Build a static version of libchrome. am: e31114d100
am: cf06157e18  -s ours

Change-Id: I799a0031ae47552b6785d436ff0739b100817ef4
diff --git a/Android.mk b/Android.mk
index e934cd9..952bca5 100644
--- a/Android.mk
+++ b/Android.mk
@@ -29,12 +29,10 @@
 	-Wno-missing-field-initializers
 libchromeCommonCFlags := -Wall -Werror
 libchromeCommonCIncludes := \
-	external/gmock/include \
-	external/gtest/include \
 	external/valgrind/include \
 	external/valgrind \
 
-libchromeExportedCIncludes := $(LOCAL_PATH) $(TOP)/external/gtest/include
+libchromeExportedCIncludes := $(LOCAL_PATH)
 
 libchromeCommonSrc := \
 	base/at_exit.cc \
@@ -65,10 +63,12 @@
 	base/files/file_util.cc \
 	base/files/file_util_posix.cc \
 	base/files/important_file_writer.cc \
+	base/files/memory_mapped_file.cc \
+	base/files/memory_mapped_file_posix.cc \
 	base/files/scoped_file.cc \
 	base/files/scoped_temp_dir.cc \
 	base/guid.cc \
-	base/guid_posix.cc \
+	base/hash.cc \
 	base/json/json_file_value_serializer.cc \
 	base/json/json_parser.cc \
 	base/json/json_reader.cc \
@@ -98,6 +98,9 @@
 	base/metrics/histogram.cc \
 	base/metrics/histogram_samples.cc \
 	base/metrics/histogram_snapshot_manager.cc \
+	base/metrics/persistent_histogram_allocator.cc \
+	base/metrics/persistent_memory_allocator.cc \
+	base/metrics/persistent_sample_map.cc \
 	base/metrics/sample_map.cc \
 	base/metrics/sample_vector.cc \
 	base/metrics/sparse_histogram.cc \
@@ -116,7 +119,6 @@
 	base/process/process_metrics.cc \
 	base/process/process_metrics_posix.cc \
 	base/process/process_posix.cc \
-	base/profiler/alternate_timer.cc \
 	base/profiler/scoped_profile.cc \
 	base/profiler/scoped_tracker.cc \
 	base/profiler/tracked_time.cc \
@@ -141,17 +143,24 @@
 	base/synchronization/condition_variable_posix.cc \
 	base/synchronization/lock.cc \
 	base/synchronization/lock_impl_posix.cc \
+	base/synchronization/read_write_lock_posix.cc \
 	base/synchronization/waitable_event_posix.cc \
 	base/sync_socket_posix.cc \
 	base/sys_info.cc \
 	base/sys_info_posix.cc \
 	base/task/cancelable_task_tracker.cc \
 	base/task_runner.cc \
+	base/task_scheduler/scheduler_lock_impl.cc \
+	base/task_scheduler/sequence.cc \
+	base/task_scheduler/sequence_sort_key.cc \
+	base/task_scheduler/task.cc \
+	base/task_scheduler/task_traits.cc \
 	base/third_party/icu/icu_utf.cc \
 	base/third_party/nspr/prtime.cc \
 	base/threading/non_thread_safe_impl.cc \
 	base/threading/platform_thread_posix.cc \
 	base/threading/post_task_and_reply_impl.cc \
+	base/threading/sequenced_task_runner_handle.cc \
 	base/threading/sequenced_worker_pool.cc \
 	base/threading/simple_thread.cc \
 	base/threading/thread.cc \
@@ -162,9 +171,9 @@
 	base/threading/thread_local_storage.cc \
 	base/threading/thread_local_storage_posix.cc \
 	base/threading/thread_restrictions.cc \
+	base/threading/thread_task_runner_handle.cc \
 	base/threading/worker_pool.cc \
 	base/threading/worker_pool_posix.cc \
-	base/thread_task_runner_handle.cc \
 	base/time/clock.cc \
 	base/time/default_clock.cc \
 	base/time/default_tick_clock.cc \
@@ -175,19 +184,21 @@
 	base/timer/timer.cc \
 	base/trace_event/heap_profiler_allocation_context.cc \
 	base/trace_event/heap_profiler_allocation_context_tracker.cc \
+	base/trace_event/heap_profiler_allocation_register.cc \
+	base/trace_event/heap_profiler_allocation_register_posix.cc \
+	base/trace_event/heap_profiler_heap_dump_writer.cc \
 	base/trace_event/heap_profiler_stack_frame_deduplicator.cc \
 	base/trace_event/heap_profiler_type_name_deduplicator.cc \
+	base/trace_event/malloc_dump_provider.cc \
 	base/trace_event/memory_allocator_dump.cc \
 	base/trace_event/memory_allocator_dump_guid.cc \
 	base/trace_event/memory_dump_manager.cc \
-	base/trace_event/malloc_dump_provider.cc \
 	base/trace_event/memory_dump_request_args.cc \
 	base/trace_event/memory_dump_session_state.cc \
+	base/trace_event/memory_infra_background_whitelist.cc \
 	base/trace_event/process_memory_dump.cc \
 	base/trace_event/process_memory_maps.cc \
-	base/trace_event/process_memory_maps_dump_provider.cc \
 	base/trace_event/process_memory_totals.cc \
-	base/trace_event/process_memory_totals_dump_provider.cc \
 	base/trace_event/trace_buffer.cc \
 	base/trace_event/trace_config.cc \
 	base/trace_event/trace_event_argument.cc \
@@ -200,11 +211,14 @@
 	base/tracked_objects.cc \
 	base/tracking_info.cc \
 	base/values.cc \
+	base/version.cc \
 	base/vlog.cc \
 
 libchromeLinuxSrc := \
+	base/allocator/allocator_shim.cc \
 	base/files/file_path_watcher_linux.cc \
 	base/files/file_util_linux.cc \
+	base/memory/shared_memory_posix.cc \
 	base/posix/unix_domain_socket_linux.cc \
 	base/process/internal_linux.cc \
 	base/process/process_handle_linux.cc \
@@ -224,9 +238,12 @@
 	base/mac/bundle_locations.mm \
 	base/mac/foundation_util.mm \
 	base/mac/mach_logging.cc \
-	base/mac/libdispatch_task_runner.cc \
 	base/mac/scoped_mach_port.cc \
+	base/mac/scoped_mach_vm.cc \
 	base/mac/scoped_nsautorelease_pool.mm \
+	base/mac/sdk_forward_declarations.mm \
+	base/memory/shared_memory_mac.cc \
+	base/memory/shared_memory_handle_mac.cc \
 	base/message_loop/message_pump_mac.mm \
 	base/process/launch_mac.cc \
 	base/process/port_provider_mac.cc \
@@ -234,7 +251,7 @@
 	base/process/process_iterator_mac.cc \
 	base/process/process_metrics_mac.cc \
 	base/strings/sys_string_conversions_mac.mm \
-	base/sys_info_mac.cc \
+	base/sys_info_mac.mm \
 	base/time/time_mac.cc \
 	base/threading/platform_thread_mac.mm \
 
@@ -279,7 +296,6 @@
 	base/memory/linked_ptr_unittest.cc \
 	base/memory/ref_counted_memory_unittest.cc \
 	base/memory/ref_counted_unittest.cc \
-	base/memory/scoped_ptr_unittest.cc \
 	base/memory/scoped_vector_unittest.cc \
 	base/memory/singleton_unittest.cc \
 	base/memory/weak_ptr_unittest.cc \
@@ -293,12 +309,16 @@
 	base/metrics/histogram_macros_unittest.cc \
 	base/metrics/histogram_snapshot_manager_unittest.cc \
 	base/metrics/histogram_unittest.cc \
+	base/metrics/persistent_histogram_allocator_unittest.cc \
+	base/metrics/persistent_memory_allocator_unittest.cc \
+	base/metrics/persistent_sample_map_unittest.cc \
 	base/metrics/sample_map_unittest.cc \
 	base/metrics/sample_vector_unittest.cc \
 	base/metrics/sparse_histogram_unittest.cc \
 	base/metrics/statistics_recorder_unittest.cc \
 	base/numerics/safe_numerics_unittest.cc \
 	base/observer_list_unittest.cc \
+	base/optional_unittest.cc \
 	base/pickle_unittest.cc \
 	base/posix/file_descriptor_shuffle_unittest.cc \
 	base/posix/unix_domain_socket_linux_unittest.cc \
@@ -328,6 +348,10 @@
 	base/sys_info_unittest.cc \
 	base/task/cancelable_task_tracker_unittest.cc \
 	base/task_runner_util_unittest.cc \
+	base/task_scheduler/scheduler_lock_unittest.cc \
+	base/task_scheduler/sequence_sort_key_unittest.cc \
+	base/task_scheduler/sequence_unittest.cc \
+	base/task_scheduler/task_traits.cc \
 	base/template_util_unittest.cc \
 	base/test/multiprocess_test.cc \
 	base/test/multiprocess_test_android.cc \
@@ -364,8 +388,6 @@
 	base/trace_event/memory_allocator_dump_unittest.cc \
 	base/trace_event/memory_dump_manager_unittest.cc \
 	base/trace_event/process_memory_dump_unittest.cc \
-	base/trace_event/process_memory_maps_dump_provider_unittest.cc \
-	base/trace_event/process_memory_totals_dump_provider_unittest.cc \
 	base/trace_event/trace_config_unittest.cc \
 	base/trace_event/trace_event_argument_unittest.cc \
 	base/trace_event/trace_event_synthetic_delay_unittest.cc \
@@ -373,6 +395,7 @@
 	base/tracked_objects_unittest.cc \
 	base/tuple_unittest.cc \
 	base/values_unittest.cc \
+	base/version_unittest.cc \
 	base/vlog_unittest.cc \
 	testing/multiprocess_func_list.cc \
 	testrunner.cc \
@@ -381,10 +404,11 @@
 	crypto/secure_hash_unittest.cc \
 	crypto/sha2_unittest.cc \
 
-libchromeHostCFlags := -D__ANDROID_HOST__
+libchromeHostCFlags := -D__ANDROID_HOST__ -DDONT_EMBED_BUILD_METADATA
 
 ifeq ($(HOST_OS),linux)
-libchromeHostSrc := $(libchromeLinuxSrc)
+libchromeHostSrc := $(libchromeLinuxSrc) \
+	base/allocator/allocator_shim_default_dispatch_to_glibc.cc
 libchromeHostLdFlags :=
 endif
 
@@ -402,13 +426,23 @@
 # ========================================================
 include $(CLEAR_VARS)
 LOCAL_MODULE := libchrome
-LOCAL_SRC_FILES := $(libchromeCommonSrc) $(libchromeLinuxSrc) base/sys_info_chromeos.cc
+LOCAL_SRC_FILES := \
+	$(libchromeCommonSrc) \
+	$(libchromeLinuxSrc) \
+	base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc \
+	base/memory/shared_memory_android.cc \
+	base/sys_info_chromeos.cc \
+
 LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
 LOCAL_CFLAGS := $(libchromeCommonCFlags)
 LOCAL_CLANG := $(libchromeUseClang)
 LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
-LOCAL_SHARED_LIBRARIES := libevent liblog libcutils
-LOCAL_STATIC_LIBRARIES := libmodpb64
+LOCAL_LDFLAGS := -Wl,-wrap,calloc -Wl,-wrap,free -Wl,-wrap,malloc \
+	-Wl,-wrap,memalign -Wl,-wrap,realloc
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbase
+LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := libgtest_prod
+LOCAL_SHARED_LIBRARIES :=  libbase libevent liblog libcutils
+LOCAL_STATIC_LIBRARIES := libmodpb64 libgtest_prod
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
 include $(BUILD_SHARED_LIBRARY)
 
@@ -419,13 +453,18 @@
 LOCAL_SRC_FILES := \
 	$(libchromeCommonSrc) \
 	$(libchromeLinuxSrc) \
+	base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc \
+	base/memory/shared_memory_android.cc \
 	base/sys_info_chromeos.cc \
 
 LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
 LOCAL_CFLAGS := $(libchromeCommonCFlags)
 LOCAL_CLANG := $(libchromeUseClang)
 LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
-LOCAL_STATIC_LIBRARIES := libmodpb64 libbase libevent liblog libcutils
+LOCAL_LDFLAGS := -Wl,-wrap,calloc -Wl,-wrap,free -Wl,-wrap,malloc \
+	-Wl,-wrap,memalign -Wl,-wrap,realloc
+LOCAL_STATIC_LIBRARIES := libmodpb64 libgtest_prod \
+	libbase libevent liblog libcutils
 LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := $(LOCAL_STATIC_LIBRARIES)
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
 include $(BUILD_STATIC_LIBRARY)
@@ -439,9 +478,14 @@
 LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
 LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
-LOCAL_SHARED_LIBRARIES := libevent-host
-LOCAL_STATIC_LIBRARIES := libmodpb64-host
-LOCAL_SRC_FILES := $(libchromeCommonSrc) $(libchromeHostSrc)
+LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := libgtest_prod
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbase
+LOCAL_SHARED_LIBRARIES := libbase libevent
+LOCAL_STATIC_LIBRARIES := libmodpb64 libgtest_prod
+LOCAL_SRC_FILES := \
+	$(libchromeCommonSrc) \
+	$(libchromeHostSrc) \
+
 LOCAL_LDFLAGS := $(libchromeHostLdFlags)
 include $(BUILD_HOST_SHARED_LIBRARY)
 
@@ -475,8 +519,10 @@
 	libdbus \
 	libprotobuf-cpp-lite \
 
-LOCAL_STATIC_LIBRARIES :=
+LOCAL_STATIC_LIBRARIES := libgtest_prod
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
+LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := libgtest_prod
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libchrome
 include $(BUILD_SHARED_LIBRARY)
 
 endif  # local_use_dbus == 1
@@ -487,7 +533,8 @@
 LOCAL_MODULE := libchrome-crypto
 LOCAL_SRC_FILES := \
 	crypto/openssl_util.cc \
-	crypto/secure_hash_openssl.cc \
+	crypto/random.cc \
+	crypto/secure_hash.cc \
 	crypto/secure_util.cc \
 	crypto/sha2.cc \
 
@@ -501,7 +548,6 @@
 	libcrypto \
 	libssl \
 
-LOCAL_STATIC_LIBRARIES :=
 LOCAL_EXPORT_C_INCLUDE_DIRS := $(libchromeExportedCIncludes)
 include $(BUILD_SHARED_LIBRARY)
 
@@ -531,6 +577,7 @@
 include $(CLEAR_VARS)
 LOCAL_MODULE := libchrome_dbus_test_helpers
 LOCAL_SHARED_LIBRARIES := libdbus libchrome-dbus
+LOCAL_STATIC_LIBRARIES := libgmock
 LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
 LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags)
 LOCAL_CLANG := $(libchromeUseClang)
@@ -563,15 +610,12 @@
 # ========================================================
 include $(CLEAR_VARS)
 LOCAL_MODULE := libchrome_test
-ifdef BRILLO
-  LOCAL_MODULE_TAGS := debug
-endif
 LOCAL_SRC_FILES := $(libchromeCommonUnittestSrc)
 LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
 LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags) $(libchromeHostCFlags) -DUNIT_TEST
 LOCAL_CLANG := $(libchromeUseClang)
 LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
-LOCAL_SHARED_LIBRARIES := libchrome libevent-host
+LOCAL_SHARED_LIBRARIES := libchrome libevent
 LOCAL_STATIC_LIBRARIES := libgmock_host libgtest_host
 LOCAL_LDLIBS := -lrt
 include $(BUILD_HOST_NATIVE_TEST)
@@ -582,9 +626,6 @@
 # ========================================================
 include $(CLEAR_VARS)
 LOCAL_MODULE := libchrome_test
-ifdef BRILLO
-  LOCAL_MODULE_TAGS := eng
-endif
 LOCAL_SRC_FILES := $(libchromeCryptoUnittestSrc) $(libchromeCommonUnittestSrc)
 LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
 LOCAL_CFLAGS := $(libchromeCommonCFlags) $(libchromeTestCFlags) -DUNIT_TEST -DDONT_EMBED_BUILD_METADATA
diff --git a/SConstruct b/SConstruct
index 5e00390..72e022e 100644
--- a/SConstruct
+++ b/SConstruct
@@ -27,6 +27,8 @@
     'name' : 'core',
     'sources' : """
                 allocator/allocator_extension.cc
+                allocator/allocator_shim.cc
+                allocator/allocator_shim_default_dispatch_to_glibc.cc
                 at_exit.cc
                 base64.cc
                 base64url.cc
@@ -62,7 +64,7 @@
                 files/scoped_file.cc
                 files/scoped_temp_dir.cc
                 guid.cc
-                guid_posix.cc
+                hash.cc
                 json/json_file_value_serializer.cc
                 json/json_parser.cc
                 json/json_reader.cc
@@ -74,8 +76,10 @@
                 location.cc
                 logging.cc
                 md5.cc
+                memory/aligned_memory.cc
                 memory/ref_counted.cc
                 memory/ref_counted_memory.cc
+                memory/shared_memory_posix.cc
                 memory/singleton.cc
                 memory/weak_ptr.cc
                 message_loop/incoming_task_queue.cc
@@ -92,6 +96,9 @@
                 metrics/histogram.cc
                 metrics/histogram_samples.cc
                 metrics/histogram_snapshot_manager.cc
+                metrics/persistent_histogram_allocator.cc
+                metrics/persistent_memory_allocator.cc
+                metrics/persistent_sample_map.cc
                 metrics/sample_map.cc
                 metrics/sample_vector.cc
                 metrics/sparse_histogram.cc
@@ -115,7 +122,6 @@
                 process/process_metrics_linux.cc
                 process/process_metrics_posix.cc
                 process/process_posix.cc
-                profiler/alternate_timer.cc
                 profiler/scoped_profile.cc
                 profiler/scoped_tracker.cc
                 profiler/tracked_time.cc
@@ -141,6 +147,7 @@
                 synchronization/condition_variable_posix.cc
                 synchronization/lock.cc
                 synchronization/lock_impl_posix.cc
+                synchronization/read_write_lock_posix.cc
                 synchronization/waitable_event_posix.cc
                 synchronization/waitable_event_watcher_posix.cc
                 sync_socket_posix.cc
@@ -150,6 +157,11 @@
                 sys_info_posix.cc
                 task_runner.cc
                 task/cancelable_task_tracker.cc
+                task_scheduler/scheduler_lock_impl.cc
+                task_scheduler/sequence.cc
+                task_scheduler/sequence_sort_key.cc
+                task_scheduler/task.cc
+                task_scheduler/task_traits.cc
                 third_party/icu/icu_utf.cc
                 third_party/nspr/prtime.cc
                 threading/non_thread_safe_impl.cc
@@ -157,6 +169,7 @@
                 threading/platform_thread_linux.cc
                 threading/platform_thread_posix.cc
                 threading/post_task_and_reply_impl.cc
+                threading/sequenced_task_runner_handle.cc
                 threading/sequenced_worker_pool.cc
                 threading/simple_thread.cc
                 threading/thread.cc
@@ -167,9 +180,9 @@
                 threading/thread_local_storage.cc
                 threading/thread_local_storage_posix.cc
                 threading/thread_restrictions.cc
+                threading/thread_task_runner_handle.cc
                 threading/worker_pool.cc
                 threading/worker_pool_posix.cc
-                thread_task_runner_handle.cc
                 timer/elapsed_timer.cc
                 timer/timer.cc
                 time/clock.cc
@@ -178,21 +191,23 @@
                 time/tick_clock.cc
                 time/time.cc
                 time/time_posix.cc
-                trace_event/malloc_dump_provider.cc
                 trace_event/heap_profiler_allocation_context.cc
                 trace_event/heap_profiler_allocation_context_tracker.cc
+                trace_event/heap_profiler_allocation_register.cc
+                trace_event/heap_profiler_allocation_register_posix.cc
+                trace_event/heap_profiler_heap_dump_writer.cc
                 trace_event/heap_profiler_stack_frame_deduplicator.cc
                 trace_event/heap_profiler_type_name_deduplicator.cc
+                trace_event/malloc_dump_provider.cc
                 trace_event/memory_allocator_dump.cc
                 trace_event/memory_allocator_dump_guid.cc
                 trace_event/memory_dump_manager.cc
                 trace_event/memory_dump_request_args.cc
                 trace_event/memory_dump_session_state.cc
+                trace_event/memory_infra_background_whitelist.cc
                 trace_event/process_memory_dump.cc
                 trace_event/process_memory_maps.cc
-                trace_event/process_memory_maps_dump_provider.cc
                 trace_event/process_memory_totals.cc
-                trace_event/process_memory_totals_dump_provider.cc
                 trace_event/trace_buffer.cc
                 trace_event/trace_config.cc
                 trace_event/trace_event_argument.cc
@@ -205,6 +220,7 @@
                 tracked_objects.cc
                 tracking_info.cc
                 values.cc
+                version.cc
                 vlog.cc
                 """,
     'prefix' : 'base',
@@ -257,13 +273,14 @@
                 hmac_nss.cc
                 nss_key_util.cc
                 nss_util.cc
+                openssl_util.cc
                 p224.cc
                 p224_spake.cc
                 random.cc
                 rsa_private_key.cc
                 rsa_private_key_nss.cc
                 scoped_test_nss_db.cc
-                secure_hash_default.cc
+                secure_hash.cc
                 secure_util.cc
                 sha2.cc
                 signature_creator_nss.cc
@@ -274,7 +291,7 @@
                 """,
     'prefix' : 'crypto',
     'libs' : '%s-dl-%s' % (base_name, BASE_VER),
-    'pc_libs' : 'nss',
+    'pc_libs' : 'nss openssl',
   },
   {
     'name' : 'sandbox',
@@ -334,6 +351,7 @@
 env['CCFLAGS'] += ['-DOS_CHROMEOS',
                    '-DUSE_NSS_CERTS',
                    '-DUSE_SYSTEM_LIBEVENT',
+                   '-DNO_TCMALLOC',
                    '-fPIC',
                    '-fno-exceptions',
                    '-Wall',
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 5d8510f..c147989 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -18,16 +18,30 @@
 # huge sequence of random-looking conditionals.
 
 import("//build/buildflag_header.gni")
+import("//build/config/allocator.gni")
+import("//build/config/chromecast_build.gni")
 import("//build/config/compiler/compiler.gni")
 import("//build/config/nacl/config.gni")
+import("//build/config/sysroot.gni")
 import("//build/config/ui.gni")
 import("//build/nocompile.gni")
 import("//testing/test.gni")
 
+declare_args() {
+  # Override this value to give a specific build date.
+  # See //base/build_time.cc and //build/write_build_date_header.py for more
+  # details and the expected format.
+  override_build_date = "N/A"
+}
+
 if (is_android) {
   import("//build/config/android/rules.gni")
 }
 
+if (is_win) {
+  import("//build/config/win/visual_studio_version.gni")
+}
+
 config("base_flags") {
   if (is_clang) {
     cflags = [
@@ -40,6 +54,7 @@
 
 config("base_implementation") {
   defines = [ "BASE_IMPLEMENTATION" ]
+  configs = [ "//build/config/compiler:wexit_time_destructors" ]
 }
 
 if (is_win) {
@@ -93,8 +108,10 @@
   }
 }
 
-config("android_system_libs") {
-  libs = [ "log" ]  # Used by logging.cc.
+if (is_android) {
+  config("android_system_libs") {
+    libs = [ "log" ]  # Used by logging.cc.
+  }
 }
 
 # Base and everything it depends on should be a static library rather than
@@ -110,12 +127,14 @@
 # test code (test support and anything in the test directory) which should use
 # source_set as is recommended for GN targets).
 component("base") {
-  # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
-  # hacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
-  # reasons for this seem to involve obscure toolchain bugs. This should be
-  # fixed and this target should always be a static_library in the
-  # non-component case.
-  component_never_use_source_set = !is_nacl_nonsfi
+  if (is_nacl_nonsfi) {
+    # TODO(phosek) bug 570839: If field_trial.cc is in a static library,
+    # nacl_helper_nonsfi doesn't link properly on Linux in debug builds. The
+    # reasons for this seem to involve obscure toolchain bugs. This should be
+    # fixed and this target should always be a static_library in the
+    # non-component case.
+    static_component_type = "source_set"
+  }
 
   sources = [
     "allocator/allocator_check.cc",
@@ -134,6 +153,8 @@
     "android/base_jni_registrar.h",
     "android/build_info.cc",
     "android/build_info.h",
+    "android/callback_android.cc",
+    "android/callback_android.h",
     "android/command_line_android.cc",
     "android/command_line_android.h",
     "android/content_uri_utils.cc",
@@ -212,7 +233,6 @@
     "bind_helpers.cc",
     "bind_helpers.h",
     "bind_internal.h",
-    "bind_internal_win.h",
     "bit_cast.h",
     "bits.h",
     "build_time.cc",
@@ -331,8 +351,6 @@
     "gtest_prod_util.h",
     "guid.cc",
     "guid.h",
-    "guid_posix.cc",
-    "guid_win.cc",
     "hash.cc",
     "hash.h",
     "id_map.h",
@@ -380,6 +398,7 @@
     "mac/call_with_eh_frame.cc",
     "mac/call_with_eh_frame.h",
     "mac/call_with_eh_frame_asm.S",
+    "mac/close_nocancel.cc",
     "mac/cocoa_protocols.h",
     "mac/dispatch_source_mach.cc",
     "mac/dispatch_source_mach.h",
@@ -389,14 +408,16 @@
     "mac/launch_services_util.h",
     "mac/launchd.cc",
     "mac/launchd.h",
-    "mac/libdispatch_task_runner.cc",
-    "mac/libdispatch_task_runner.h",
-    "mac/mac_logging.cc",
     "mac/mac_logging.h",
+    "mac/mac_logging.mm",
     "mac/mac_util.h",
     "mac/mac_util.mm",
     "mac/mach_logging.cc",
     "mac/mach_logging.h",
+    "mac/mach_port_broker.h",
+    "mac/mach_port_broker.mm",
+    "mac/mach_port_util.cc",
+    "mac/mach_port_util.h",
     "mac/objc_property_releaser.h",
     "mac/objc_property_releaser.mm",
     "mac/os_crash_dumps.cc",
@@ -405,6 +426,7 @@
     "mac/scoped_authorizationref.h",
     "mac/scoped_block.h",
     "mac/scoped_cftyperef.h",
+    "mac/scoped_dispatch_object.h",
     "mac/scoped_ioobject.h",
     "mac/scoped_ioplugininterface.h",
     "mac/scoped_launch_data.h",
@@ -415,6 +437,7 @@
     "mac/scoped_nsautorelease_pool.h",
     "mac/scoped_nsautorelease_pool.mm",
     "mac/scoped_nsobject.h",
+    "mac/scoped_nsobject.mm",
     "mac/scoped_objc_class_swizzler.h",
     "mac/scoped_objc_class_swizzler.mm",
     "mac/scoped_sending_event.h",
@@ -432,6 +455,7 @@
     "memory/discardable_memory_allocator.h",
     "memory/discardable_shared_memory.cc",
     "memory/discardable_shared_memory.h",
+    "memory/free_deleter.h",
     "memory/linked_ptr.h",
     "memory/manual_constructor.h",
     "memory/memory_pressure_listener.cc",
@@ -452,7 +476,6 @@
     "memory/ref_counted_memory.cc",
     "memory/ref_counted_memory.h",
     "memory/scoped_policy.h",
-    "memory/scoped_ptr.h",
     "memory/scoped_vector.h",
     "memory/shared_memory.h",
     "memory/shared_memory_android.cc",
@@ -507,6 +530,12 @@
     "metrics/histogram_snapshot_manager.h",
     "metrics/metrics_hashes.cc",
     "metrics/metrics_hashes.h",
+    "metrics/persistent_histogram_allocator.cc",
+    "metrics/persistent_histogram_allocator.h",
+    "metrics/persistent_memory_allocator.cc",
+    "metrics/persistent_memory_allocator.h",
+    "metrics/persistent_sample_map.cc",
+    "metrics/persistent_sample_map.h",
     "metrics/sample_map.cc",
     "metrics/sample_map.h",
     "metrics/sample_vector.cc",
@@ -518,7 +547,6 @@
     "metrics/user_metrics.cc",
     "metrics/user_metrics.h",
     "metrics/user_metrics_action.h",
-    "move.h",
     "native_library.h",
     "native_library_ios.mm",
     "native_library_mac.mm",
@@ -534,6 +562,7 @@
     "numerics/safe_math_impl.h",
     "observer_list.h",
     "observer_list_threadsafe.h",
+    "optional.h",
     "os_compat_android.cc",
     "os_compat_android.h",
     "os_compat_nacl.cc",
@@ -623,8 +652,6 @@
     "process/process_metrics_win.cc",
     "process/process_posix.cc",
     "process/process_win.cc",
-    "profiler/alternate_timer.cc",
-    "profiler/alternate_timer.h",
     "profiler/native_stack_sampler.cc",
     "profiler/native_stack_sampler.h",
     "profiler/native_stack_sampler_posix.cc",
@@ -654,9 +681,8 @@
     "sequenced_task_runner.cc",
     "sequenced_task_runner.h",
     "sequenced_task_runner_helpers.h",
+    "sha1.cc",
     "sha1.h",
-    "sha1_portable.cc",
-    "sha1_win.cc",
     "single_thread_task_runner.h",
     "stl_util.h",
     "strings/latin1_string_conversions.cc",
@@ -709,6 +735,10 @@
     "synchronization/lock_impl.h",
     "synchronization/lock_impl_posix.cc",
     "synchronization/lock_impl_win.cc",
+    "synchronization/read_write_lock.h",
+    "synchronization/read_write_lock_nacl.cc",
+    "synchronization/read_write_lock_posix.cc",
+    "synchronization/read_write_lock_win.cc",
     "synchronization/spin_wait.h",
     "synchronization/waitable_event.h",
     "synchronization/waitable_event_posix.cc",
@@ -725,7 +755,7 @@
     #"sys_info_freebsd.cc",  # Unused in Chromium build.
     "sys_info_ios.mm",
     "sys_info_linux.cc",
-    "sys_info_mac.cc",
+    "sys_info_mac.mm",
 
     #"sys_info_openbsd.cc",  # Unused in Chromium build.
     "sys_info_posix.cc",
@@ -737,6 +767,36 @@
     "task_runner.cc",
     "task_runner.h",
     "task_runner_util.h",
+    "task_scheduler/delayed_task_manager.cc",
+    "task_scheduler/delayed_task_manager.h",
+    "task_scheduler/priority_queue.cc",
+    "task_scheduler/priority_queue.h",
+    "task_scheduler/scheduler_lock.h",
+    "task_scheduler/scheduler_lock_impl.cc",
+    "task_scheduler/scheduler_lock_impl.h",
+    "task_scheduler/scheduler_service_thread.cc",
+    "task_scheduler/scheduler_service_thread.h",
+    "task_scheduler/scheduler_worker.cc",
+    "task_scheduler/scheduler_worker.h",
+    "task_scheduler/scheduler_worker_pool.h",
+    "task_scheduler/scheduler_worker_pool_impl.cc",
+    "task_scheduler/scheduler_worker_pool_impl.h",
+    "task_scheduler/scheduler_worker_stack.cc",
+    "task_scheduler/scheduler_worker_stack.h",
+    "task_scheduler/sequence.cc",
+    "task_scheduler/sequence.h",
+    "task_scheduler/sequence_sort_key.cc",
+    "task_scheduler/sequence_sort_key.h",
+    "task_scheduler/task.cc",
+    "task_scheduler/task.h",
+    "task_scheduler/task_scheduler.cc",
+    "task_scheduler/task_scheduler.h",
+    "task_scheduler/task_scheduler_impl.cc",
+    "task_scheduler/task_scheduler_impl.h",
+    "task_scheduler/task_tracker.cc",
+    "task_scheduler/task_tracker.h",
+    "task_scheduler/task_traits.cc",
+    "task_scheduler/task_traits.h",
     "template_util.h",
     "third_party/dmg_fp/dmg_fp.h",
     "third_party/dmg_fp/dtoa_wrapper.cc",
@@ -746,8 +806,6 @@
     "third_party/nspr/prtime.cc",
     "third_party/nspr/prtime.h",
     "third_party/superfasthash/superfasthash.c",
-    "thread_task_runner_handle.cc",
-    "thread_task_runner_handle.h",
     "threading/non_thread_safe.h",
     "threading/non_thread_safe_impl.cc",
     "threading/non_thread_safe_impl.h",
@@ -786,6 +844,8 @@
     "threading/thread_local_win.cc",
     "threading/thread_restrictions.cc",
     "threading/thread_restrictions.h",
+    "threading/thread_task_runner_handle.cc",
+    "threading/thread_task_runner_handle.h",
     "threading/watchdog.cc",
     "threading/watchdog.h",
     "threading/worker_pool.cc",
@@ -815,7 +875,10 @@
     "timer/mock_timer.h",
     "timer/timer.cc",
     "timer/timer.h",
+    "trace_event/blame_context.cc",
+    "trace_event/blame_context.h",
     "trace_event/common/trace_event_common.h",
+    "trace_event/heap_profiler.h",
     "trace_event/heap_profiler_allocation_context.cc",
     "trace_event/heap_profiler_allocation_context.h",
     "trace_event/heap_profiler_allocation_context_tracker.cc",
@@ -843,15 +906,14 @@
     "trace_event/memory_dump_request_args.h",
     "trace_event/memory_dump_session_state.cc",
     "trace_event/memory_dump_session_state.h",
+    "trace_event/memory_infra_background_whitelist.cc",
+    "trace_event/memory_infra_background_whitelist.h",
     "trace_event/process_memory_dump.cc",
     "trace_event/process_memory_dump.h",
     "trace_event/process_memory_maps.cc",
     "trace_event/process_memory_maps.h",
-    "trace_event/process_memory_maps_dump_provider.h",
     "trace_event/process_memory_totals.cc",
     "trace_event/process_memory_totals.h",
-    "trace_event/process_memory_totals_dump_provider.cc",
-    "trace_event/process_memory_totals_dump_provider.h",
     "trace_event/trace_buffer.cc",
     "trace_event/trace_buffer.h",
     "trace_event/trace_config.cc",
@@ -935,6 +997,8 @@
     "win/shortcut.h",
     "win/startup_information.cc",
     "win/startup_information.h",
+    "win/wait_chain.cc",
+    "win/wait_chain.h",
     "win/win_util.cc",
     "win/win_util.h",
     "win/windows_version.cc",
@@ -954,6 +1018,8 @@
   ]
 
   deps = [
+    "//base/allocator",
+    "//base/allocator:features",
     "//base/third_party/dynamic_annotations",
     "//third_party/modp_b64",
   ]
@@ -961,12 +1027,24 @@
   public_deps = [
     ":base_paths",
     ":base_static",
+    ":build_date",
     ":debugging_flags",
   ]
 
+  # Needed for <atomic> if using newer C++ library than sysroot
+  if (!use_sysroot && (is_android || is_linux)) {
+    libs = [ "atomic" ]
+  }
+
+  if (use_experimental_allocator_shim) {
+    # The allocator shim is part of the base API. This is to allow clients of
+    # base should to install hooks into the allocator path.
+    public_deps += [ "//base/allocator:unified_allocator_shim" ]
+  }
+
   # Allow more direct string conversions on platforms with native utf8
   # strings
-  if (is_mac || is_ios || is_chromeos) {
+  if (is_mac || is_ios || is_chromeos || is_chromecast) {
     defines += [ "SYSTEM_NATIVE_UTF8" ]
   }
 
@@ -991,7 +1069,6 @@
       "sys_info_linux.cc",
       "trace_event/malloc_dump_provider.cc",
       "trace_event/malloc_dump_provider.h",
-      "trace_event/process_memory_maps_dump_provider.cc",
     ]
     set_sources_assignment_filter(sources_assignment_filter)
 
@@ -1056,9 +1133,9 @@
       "process/process_posix.cc",
       "scoped_native_library.cc",
       "sync_socket_posix.cc",
+      "synchronization/read_write_lock_posix.cc",
       "sys_info.cc",
       "sys_info_posix.cc",
-      "trace_event/process_memory_totals_dump_provider.cc",
       "trace_event/trace_event_system_stats_monitor.cc",
     ]
 
@@ -1091,6 +1168,7 @@
       "os_compat_nacl.cc",
       "os_compat_nacl.h",
       "rand_util_nacl.cc",
+      "synchronization/read_write_lock_nacl.cc",
     ]
   }
 
@@ -1104,16 +1182,12 @@
     sources -= [
       "message_loop/message_pump_libevent.cc",
       "strings/string16.cc",
-
-      # Not using sha1_win.cc because it may have caused a
-      # regression to page cycler moz.
-      "sha1_win.cc",
     ]
 
-    # Required for base/stack_trace_win.cc to symbolize correctly.
-    data += [ "$root_build_dir/dbghelp.dll" ]
-
-    deps += [ "//base/trace_event/etw_manifest:chrome_events_win" ]
+    deps += [
+      "//base/trace_event/etw_manifest:chrome_events_win",
+      "//base/win:base_win_features",
+    ]
 
     if (is_component_build) {
       # Copy the VS runtime DLLs into the isolate so that they don't have to be
@@ -1127,12 +1201,63 @@
 
       # These runtime files are copied to the output directory by the
       # vs_toolchain script that runs as part of toolchain configuration.
-      data += [
-        "$root_out_dir/msvcp120${vcrt_suffix}.dll",
-        "$root_out_dir/msvcr120${vcrt_suffix}.dll",
-      ]
+      if (visual_studio_version == "2015") {
+        data += [
+          "$root_out_dir/msvcp140${vcrt_suffix}.dll",
+          "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
+          "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
+
+          # Universal Windows 10 CRT files
+          "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
+          "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
+          "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
+          "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
+        ]
+      } else {
+        data += [
+          "$root_out_dir/msvcp120${vcrt_suffix}.dll",
+          "$root_out_dir/msvcr120${vcrt_suffix}.dll",
+        ]
+      }
       if (is_asan) {
-        data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/3.8.0/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+        data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
       }
     }
 
@@ -1141,9 +1266,10 @@
 
     libs = [
       "cfgmgr32.lib",
-      "netapi32.lib",
       "powrprof.lib",
       "setupapi.lib",
+      "userenv.lib",
+      "winmm.lib",
     ]
     all_dependent_configs = [ ":base_win_linker_flags" ]
   } else if (!is_nacl || is_nacl_nonsfi) {
@@ -1157,6 +1283,14 @@
       "trace_event/malloc_dump_provider.cc",
       "trace_event/malloc_dump_provider.h",
     ]
+    libs = [
+      "ApplicationServices.framework",
+      "AppKit.framework",
+      "bsm",
+      "CoreFoundation.framework",
+      "IOKit.framework",
+      "Security.framework",
+    ]
   }
 
   # Mac or iOS.
@@ -1167,11 +1301,6 @@
       "strings/sys_string_conversions_posix.cc",
       "threading/platform_thread_internal_posix.cc",
     ]
-
-    if (is_asan) {
-      # TODO(GYP) hook up asan on Mac. GYP has this extra dylib:
-      #data += [ "$root_out_dir/libclang_rt.asan_osx_dynamic.dylib" ]
-    }
   } else {
     # Non-Mac/ios.
     sources -= [
@@ -1187,7 +1316,6 @@
     sources += [
       "trace_event/malloc_dump_provider.cc",
       "trace_event/malloc_dump_provider.h",
-      "trace_event/process_memory_maps_dump_provider.cc",
     ]
 
     if (is_asan || is_lsan || is_msan || is_tsan) {
@@ -1272,12 +1400,13 @@
       "mac/call_with_eh_frame.h",
       "mac/foundation_util.h",
       "mac/foundation_util.mm",
-      "mac/mac_logging.cc",
       "mac/mac_logging.h",
+      "mac/mac_logging.mm",
       "mac/mach_logging.cc",
       "mac/mach_logging.h",
       "mac/objc_property_releaser.h",
       "mac/objc_property_releaser.mm",
+      "mac/scoped_block.h",
       "mac/scoped_mach_port.cc",
       "mac/scoped_mach_port.h",
       "mac/scoped_mach_vm.cc",
@@ -1285,8 +1414,11 @@
       "mac/scoped_nsautorelease_pool.h",
       "mac/scoped_nsautorelease_pool.mm",
       "mac/scoped_nsobject.h",
+      "mac/scoped_nsobject.mm",
       "mac/scoped_objc_class_swizzler.h",
       "mac/scoped_objc_class_swizzler.mm",
+      "mac/scoped_typeref.h",
+      "memory/shared_memory_posix.cc",
       "message_loop/message_pump_mac.h",
       "message_loop/message_pump_mac.mm",
       "process/memory_stubs.cc",
@@ -1407,146 +1539,70 @@
 
   # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
   configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
-}
 
-if (is_ios || is_android || is_win || (is_linux && !is_chromeos)) {
-  # TODO(GYP): Figure out which of these work and are needed on other platforms.
-  test("base_perftests") {
-    sources = [
-      "message_loop/message_pump_perftest.cc",
-
-      # "test/run_all_unittests.cc",
-      "threading/thread_perftest.cc",
-    ]
-    deps = [
-      ":base",
-      "//base/test:test_support",
-      "//base/test:test_support_perf",
-      "//testing/gtest",
-      "//testing/perf",
-    ]
-
-    if (is_android) {
-      deps += [ "//testing/android/native_test:native_test_native_code" ]
-    }
-  }
-
-  test("base_i18n_perftests") {
-    sources = [
-      "i18n/streaming_utf8_validator_perftest.cc",
-    ]
-    deps = [
-      ":base",
-      ":i18n",
-      "//base/test:test_support",
-      "//base/test:test_support_perf",
-      "//testing/gtest",
-    ]
-  }
-
-  if (!is_ios) {
-    executable("build_utf8_validator_tables") {
-      sources = [
-        "i18n/build_utf8_validator_tables.cc",
-      ]
-      deps = [
-        ":base",
-        "//build/config/sanitizers:deps",
-        "//third_party/icu:icuuc",
-      ]
-    }
-
-    executable("check_example") {
-      sources = [
-        "check_example.cc",
-      ]
-      deps = [
-        ":base",
-        "//build/config/sanitizers:deps",
-      ]
-    }
+  if (is_mac) {
+    libs = [ "CoreFoundation.framework" ]
   }
 }
 
-component("prefs") {
+test("base_perftests") {
   sources = [
-    "prefs/default_pref_store.cc",
-    "prefs/default_pref_store.h",
-    "prefs/json_pref_store.cc",
-    "prefs/json_pref_store.h",
-    "prefs/overlay_user_pref_store.cc",
-    "prefs/overlay_user_pref_store.h",
-    "prefs/pref_change_registrar.cc",
-    "prefs/pref_change_registrar.h",
-    "prefs/pref_member.cc",
-    "prefs/pref_member.h",
-    "prefs/pref_notifier_impl.cc",
-    "prefs/pref_notifier_impl.h",
-    "prefs/pref_registry.cc",
-    "prefs/pref_registry.h",
-    "prefs/pref_registry_simple.cc",
-    "prefs/pref_registry_simple.h",
-    "prefs/pref_service.cc",
-    "prefs/pref_service.h",
-    "prefs/pref_service_factory.cc",
-    "prefs/pref_service_factory.h",
-    "prefs/pref_store.cc",
-    "prefs/pref_store.h",
-    "prefs/pref_value_map.cc",
-    "prefs/pref_value_map.h",
-    "prefs/pref_value_store.cc",
-    "prefs/pref_value_store.h",
-    "prefs/scoped_user_pref_update.cc",
-    "prefs/scoped_user_pref_update.h",
-    "prefs/value_map_pref_store.cc",
-    "prefs/value_map_pref_store.h",
-  ]
-  if (!is_ios) {
-    sources += [
-      "prefs/base_prefs_export.h",
-      "prefs/persistent_pref_store.h",
-      "prefs/pref_filter.h",
-      "prefs/pref_notifier.h",
-      "prefs/pref_observer.h",
-      "prefs/writeable_pref_store.h",
-    ]
-  }
+    "message_loop/message_pump_perftest.cc",
 
-  defines = [ "BASE_PREFS_IMPLEMENTATION" ]
-
-  deps = [
-    ":base",
-  ]
-
-  if (!is_debug) {
-    configs -= [ "//build/config/compiler:default_optimization" ]
-    configs += [ "//build/config/compiler:optimize_max" ]
-  }
-}
-
-source_set("prefs_test_support") {
-  testonly = true
-  sources = [
-    "prefs/mock_pref_change_callback.cc",
-    "prefs/mock_pref_change_callback.h",
-    "prefs/pref_store_observer_mock.cc",
-    "prefs/pref_store_observer_mock.h",
-    "prefs/testing_pref_service.cc",
-    "prefs/testing_pref_service.h",
-    "prefs/testing_pref_store.cc",
-    "prefs/testing_pref_store.h",
-  ]
-
-  public_deps = [
-    ":prefs",
+    # "test/run_all_unittests.cc",
+    "threading/thread_perftest.cc",
   ]
   deps = [
     ":base",
-    "//testing/gmock",
+    "//base/test:test_support",
+    "//base/test:test_support_perf",
+    "//testing/gtest",
+    "//testing/perf",
+  ]
+
+  if (is_android) {
+    deps += [ "//testing/android/native_test:native_test_native_code" ]
+  }
+}
+
+test("base_i18n_perftests") {
+  sources = [
+    "i18n/streaming_utf8_validator_perftest.cc",
+  ]
+  deps = [
+    ":base",
+    ":i18n",
+    "//base/test:test_support",
+    "//base/test:test_support_perf",
     "//testing/gtest",
   ]
 }
 
+if (!is_ios) {
+  executable("build_utf8_validator_tables") {
+    sources = [
+      "i18n/build_utf8_validator_tables.cc",
+    ]
+    deps = [
+      ":base",
+      "//build/config/sanitizers:deps",
+      "//build/win:default_exe_manifest",
+      "//third_party/icu:icuuc",
+    ]
+  }
+
+  executable("check_example") {
+    sources = [
+      "check_example.cc",
+    ]
+    deps = [
+      ":base",
+      "//build/config/sanitizers:deps",
+      "//build/win:default_exe_manifest",
+    ]
+  }
+}
+
 source_set("message_loop_tests") {
   testonly = true
   sources = [
@@ -1581,7 +1637,17 @@
     ]
   }
 
-  if (target_cpu == "x64") {
+  loadable_module("scoped_handle_test_dll") {
+    sources = [
+      "win/scoped_handle_test_dll.cc",
+    ]
+    deps = [
+      ":base",
+      "//base/win:base_win_features",
+    ]
+  }
+
+  if (current_cpu == "x64") {
     # Must be a shared library so that it can be unloaded during testing.
     shared_library("base_profiler_test_support_library") {
       sources = [
@@ -1594,13 +1660,52 @@
   }
 }
 
-# TODO(GYP): Delete this after we've converted everything to GN.
-# The _run targets exist only for compatibility w/ GYP.
-group("base_unittests_run") {
+bundle_data("base_unittests_bundle_data") {
   testonly = true
-  deps = [
-    ":base_unittests",
+  sources = [
+    "test/data/file_util/binary_file.bin",
+    "test/data/file_util/binary_file_diff.bin",
+    "test/data/file_util/binary_file_same.bin",
+    "test/data/file_util/blank_line.txt",
+    "test/data/file_util/blank_line_crlf.txt",
+    "test/data/file_util/crlf.txt",
+    "test/data/file_util/different.txt",
+    "test/data/file_util/different_first.txt",
+    "test/data/file_util/different_last.txt",
+    "test/data/file_util/empty1.txt",
+    "test/data/file_util/empty2.txt",
+    "test/data/file_util/first1.txt",
+    "test/data/file_util/first2.txt",
+    "test/data/file_util/original.txt",
+    "test/data/file_util/same.txt",
+    "test/data/file_util/same_length.txt",
+    "test/data/file_util/shortened.txt",
+    "test/data/json/bom_feff.json",
+    "test/data/serializer_nested_test.json",
+    "test/data/serializer_test.json",
+    "test/data/serializer_test_nowhitespace.json",
   ]
+  outputs = [
+    "{{bundle_resources_dir}}/" +
+        "{{source_root_relative_dir}}/{{source_file_part}}",
+  ]
+}
+
+if (is_ios || is_mac) {
+  source_set("base_unittests_arc") {
+    testonly = true
+    set_sources_assignment_filter([])
+    sources = [
+      "mac/bind_objc_block_unittest_arc.mm",
+      "mac/scoped_nsobject_unittest_arc.mm",
+    ]
+    set_sources_assignment_filter(sources_assignment_filter)
+    configs += [ "//build/config/compiler:enable_arc" ]
+    deps = [
+      ":base",
+      "//testing/gtest",
+    ]
+  }
 }
 
 test("base_unittests") {
@@ -1622,6 +1727,7 @@
     "base64url_unittest.cc",
     "big_endian_unittest.cc",
     "bind_unittest.cc",
+    "bit_cast_unittest.cc",
     "bits_unittest.cc",
     "build_time_unittest.cc",
     "callback_helpers_unittest.cc",
@@ -1645,7 +1751,8 @@
     "debug/task_annotator_unittest.cc",
     "deferred_sequenced_task_runner_unittest.cc",
     "environment_unittest.cc",
-    "file_version_info_unittest.cc",
+    "feature_list_unittest.cc",
+    "file_version_info_win_unittest.cc",
     "files/dir_reader_posix_unittest.cc",
     "files/file_locking_unittest.cc",
     "files/file_path_unittest.cc",
@@ -1687,8 +1794,8 @@
     "mac/call_with_eh_frame_unittest.mm",
     "mac/dispatch_source_mach_unittest.cc",
     "mac/foundation_util_unittest.mm",
-    "mac/libdispatch_task_runner_unittest.cc",
     "mac/mac_util_unittest.mm",
+    "mac/mach_port_broker_unittest.cc",
     "mac/objc_property_releaser_unittest.mm",
     "mac/scoped_nsobject_unittest.mm",
     "mac/scoped_objc_class_swizzler_unittest.mm",
@@ -1697,15 +1804,17 @@
     "memory/aligned_memory_unittest.cc",
     "memory/discardable_shared_memory_unittest.cc",
     "memory/linked_ptr_unittest.cc",
+    "memory/memory_pressure_listener_unittest.cc",
     "memory/memory_pressure_monitor_chromeos_unittest.cc",
+    "memory/memory_pressure_monitor_mac_unittest.cc",
     "memory/memory_pressure_monitor_win_unittest.cc",
     "memory/ptr_util_unittest.cc",
     "memory/ref_counted_memory_unittest.cc",
     "memory/ref_counted_unittest.cc",
-    "memory/scoped_ptr_unittest.cc",
     "memory/scoped_vector_unittest.cc",
     "memory/shared_memory_mac_unittest.cc",
     "memory/shared_memory_unittest.cc",
+    "memory/shared_memory_win_unittest.cc",
     "memory/singleton_unittest.cc",
     "memory/weak_ptr_unittest.cc",
     "message_loop/message_loop_task_runner_unittest.cc",
@@ -1720,6 +1829,9 @@
     "metrics/histogram_snapshot_manager_unittest.cc",
     "metrics/histogram_unittest.cc",
     "metrics/metrics_hashes_unittest.cc",
+    "metrics/persistent_histogram_allocator_unittest.cc",
+    "metrics/persistent_memory_allocator_unittest.cc",
+    "metrics/persistent_sample_map_unittest.cc",
     "metrics/sample_map_unittest.cc",
     "metrics/sample_vector_unittest.cc",
     "metrics/sparse_histogram_unittest.cc",
@@ -1727,22 +1839,13 @@
     "native_library_unittest.cc",
     "numerics/safe_numerics_unittest.cc",
     "observer_list_unittest.cc",
+    "optional_unittest.cc",
     "os_compat_android_unittest.cc",
     "path_service_unittest.cc",
     "pickle_unittest.cc",
     "posix/file_descriptor_shuffle_unittest.cc",
     "posix/unix_domain_socket_linux_unittest.cc",
     "power_monitor/power_monitor_unittest.cc",
-    "prefs/default_pref_store_unittest.cc",
-    "prefs/json_pref_store_unittest.cc",
-    "prefs/overlay_user_pref_store_unittest.cc",
-    "prefs/pref_change_registrar_unittest.cc",
-    "prefs/pref_member_unittest.cc",
-    "prefs/pref_notifier_impl_unittest.cc",
-    "prefs/pref_service_unittest.cc",
-    "prefs/pref_value_map_unittest.cc",
-    "prefs/pref_value_store_unittest.cc",
-    "prefs/scoped_user_pref_update_unittest.cc",
     "process/memory_unittest.cc",
     "process/memory_unittest_mac.h",
     "process/memory_unittest_mac.mm",
@@ -1753,6 +1856,7 @@
     "profiler/stack_sampling_profiler_unittest.cc",
     "profiler/tracked_time_unittest.cc",
     "rand_util_unittest.cc",
+    "run_loop_unittest.cc",
     "scoped_clear_errno_unittest.cc",
     "scoped_generic_unittest.cc",
     "scoped_native_library_unittest.cc",
@@ -1780,12 +1884,28 @@
     "synchronization/cancellation_flag_unittest.cc",
     "synchronization/condition_variable_unittest.cc",
     "synchronization/lock_unittest.cc",
+    "synchronization/read_write_lock_unittest.cc",
     "synchronization/waitable_event_unittest.cc",
     "synchronization/waitable_event_watcher_unittest.cc",
+    "sys_byteorder_unittest.cc",
     "sys_info_unittest.cc",
     "system_monitor/system_monitor_unittest.cc",
     "task/cancelable_task_tracker_unittest.cc",
     "task_runner_util_unittest.cc",
+    "task_scheduler/delayed_task_manager_unittest.cc",
+    "task_scheduler/priority_queue_unittest.cc",
+    "task_scheduler/scheduler_lock_unittest.cc",
+    "task_scheduler/scheduler_service_thread_unittest.cc",
+    "task_scheduler/scheduler_worker_pool_impl_unittest.cc",
+    "task_scheduler/scheduler_worker_stack_unittest.cc",
+    "task_scheduler/scheduler_worker_unittest.cc",
+    "task_scheduler/sequence_sort_key_unittest.cc",
+    "task_scheduler/sequence_unittest.cc",
+    "task_scheduler/task_scheduler_impl_unittest.cc",
+    "task_scheduler/task_tracker_unittest.cc",
+    "task_scheduler/test_task_factory.cc",
+    "task_scheduler/test_task_factory.h",
+    "task_scheduler/test_utils.h",
     "template_util_unittest.cc",
     "test/histogram_tester_unittest.cc",
     "test/icu_test_util.cc",
@@ -1815,6 +1935,7 @@
     "timer/mock_timer_unittest.cc",
     "timer/timer_unittest.cc",
     "tools_sanity_unittest.cc",
+    "trace_event/blame_context_unittest.cc",
     "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
     "trace_event/heap_profiler_allocation_register_unittest.cc",
     "trace_event/heap_profiler_heap_dump_writer_unittest.cc",
@@ -1824,8 +1945,6 @@
     "trace_event/memory_allocator_dump_unittest.cc",
     "trace_event/memory_dump_manager_unittest.cc",
     "trace_event/process_memory_dump_unittest.cc",
-    "trace_event/process_memory_totals_dump_provider_unittest.cc",
-    "trace_event/trace_config_memory_test_util.h",
     "trace_event/trace_config_unittest.cc",
     "trace_event/trace_event_argument_unittest.cc",
     "trace_event/trace_event_synthetic_delay_unittest.cc",
@@ -1855,17 +1974,18 @@
     "win/scoped_variant_unittest.cc",
     "win/shortcut_unittest.cc",
     "win/startup_information_unittest.cc",
+    "win/wait_chain_unittest.cc",
     "win/win_util_unittest.cc",
+    "win/windows_version_unittest.cc",
     "win/wrapped_window_proc_unittest.cc",
   ]
 
+  defines = []
+
   deps = [
     ":base",
     ":i18n",
     ":message_loop_tests",
-    ":prefs",
-    ":prefs_test_support",
-    "//base/allocator",
     "//base/test:run_all_unittests",
     "//base/test:test_support",
     "//base/third_party/dynamic_annotations",
@@ -1874,14 +1994,25 @@
     "//third_party/icu",
   ]
 
+  if (is_ios || is_mac) {
+    deps += [ ":base_unittests_arc" ]
+  }
+
+  public_deps = [
+    ":base_unittests_bundle_data",
+  ]
+
+  # Some unittests depend on the ALLOCATOR_SHIM macro.
+  configs += [ "//base/allocator:allocator_shim_define" ]
+
   data = [
     "test/data/",
   ]
 
   # Allow more direct string conversions on platforms with native utf8
   # strings
-  if (is_mac || is_ios || is_chromeos) {
-    defines = [ "SYSTEM_NATIVE_UTF8" ]
+  if (is_mac || is_ios || is_chromeos || is_chromecast) {
+    defines += [ "SYSTEM_NATIVE_UTF8" ]
   }
 
   if (is_android) {
@@ -1890,10 +2021,6 @@
       ":base_java_unittest_support",
       "//base/android/jni_generator:jni_generator_tests",
     ]
-
-    # TODO(brettw) I think this should not be here, we should not be using
-    # isolate files.
-    isolate_file = "base_unittests.isolate"
   }
 
   if (is_ios) {
@@ -1923,9 +2050,17 @@
     # TODO(GYP): dep on copy_test_data_ios action.
   }
 
+  if (is_mac) {
+    libs = [
+      "CoreFoundation.framework",
+      "Foundation.framework",
+    ]
+  }
+
   if (is_linux) {
-    sources -= [ "file_version_info_unittest.cc" ]
-    sources += [ "nix/xdg_util_unittest.cc" ]
+    if (is_desktop_linux) {
+      sources += [ "nix/xdg_util_unittest.cc" ]
+    }
 
     deps += [ "//base/test:malloc_wrapper" ]
 
@@ -1939,11 +2074,7 @@
     }
   }
 
-  if (is_linux || is_android) {
-    sources += [ "trace_event/process_memory_maps_dump_provider_unittest.cc" ]
-  }
-
-  if (!is_linux || use_ozone) {
+  if (!use_glib) {
     sources -= [ "message_loop/message_pump_glib_unittest.cc" ]
   }
 
@@ -1962,9 +2093,16 @@
     set_sources_assignment_filter(sources_assignment_filter)
   }
 
-  if (is_win && target_cpu == "x64") {
-    sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
-    deps += [ ":base_profiler_test_support_library" ]
+  if (is_win) {
+    deps += [ "//base:scoped_handle_test_dll" ]
+    if (current_cpu == "x64") {
+      sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
+      deps += [ ":base_profiler_test_support_library" ]
+    }
+  }
+
+  if (use_experimental_allocator_shim) {
+    sources += [ "allocator/allocator_shim_unittest.cc" ]
   }
 
   # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
@@ -1975,9 +2113,40 @@
     if (is_win) {
       data += [ "$root_out_dir/base_unittests.exe.pdb" ]
     } else if (is_mac) {
-      data += [ "$root_out_dir/base_unittests.dSYM/" ]
+      # TODO(crbug.com/330301): make this conditional on mac_strip_release.
+      # data += [ "$root_out_dir/base_unittests.dSYM/" ]
     }
   }
+
+  if (use_cfi_cast) {
+    # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+    defines += [ "CFI_CAST_CHECK" ]
+  }
+}
+
+action("build_date") {
+  script = "//build/write_build_date_header.py"
+
+  # Force recalculation if there's been a change.
+  inputs = [
+    "//build/util/LASTCHANGE",
+  ]
+  outputs = [
+    "$target_gen_dir/generated_build_date.h",
+  ]
+
+  args =
+      [ rebase_path("$target_gen_dir/generated_build_date.h", root_build_dir) ]
+
+  if (is_official_build) {
+    args += [ "official" ]
+  } else {
+    args += [ "default" ]
+  }
+
+  if (override_build_date != "N/A") {
+    args += [ override_build_date ]
+  }
 }
 
 if (enable_nocompile_tests) {
@@ -1986,7 +2155,6 @@
       "bind_unittest.nc",
       "callback_list_unittest.nc",
       "callback_unittest.nc",
-      "memory/scoped_ptr_unittest.nc",
       "memory/weak_ptr_unittest.nc",
     ]
 
@@ -2006,6 +2174,7 @@
       "android/java/src/org/chromium/base/ApkAssets.java",
       "android/java/src/org/chromium/base/ApplicationStatus.java",
       "android/java/src/org/chromium/base/BuildInfo.java",
+      "android/java/src/org/chromium/base/Callback.java",
       "android/java/src/org/chromium/base/CommandLine.java",
       "android/java/src/org/chromium/base/ContentUriUtils.java",
       "android/java/src/org/chromium/base/ContextUtils.java",
@@ -2046,7 +2215,7 @@
   android_library("base_java") {
     srcjar_deps = [
       ":base_android_java_enums_srcjar",
-      ":base_multidex_gen",
+      ":base_build_config_gen",
       ":base_native_libraries_gen",
     ]
 
@@ -2055,11 +2224,74 @@
       "//third_party/jsr-305:jsr_305_javalib",
     ]
 
-    DEPRECATED_java_in_dir = "android/java/src"
+    java_files = [
+      "android/java/src/org/chromium/base/ActivityState.java",
+      "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
+      "android/java/src/org/chromium/base/ApiCompatibilityUtils.java",
+      "android/java/src/org/chromium/base/ApkAssets.java",
+      "android/java/src/org/chromium/base/ApplicationStatus.java",
+      "android/java/src/org/chromium/base/BaseChromiumApplication.java",
+      "android/java/src/org/chromium/base/BaseSwitches.java",
+      "android/java/src/org/chromium/base/BuildInfo.java",
+      "android/java/src/org/chromium/base/Callback.java",
+      "android/java/src/org/chromium/base/CollectionUtil.java",
+      "android/java/src/org/chromium/base/CommandLine.java",
+      "android/java/src/org/chromium/base/CommandLineInitUtil.java",
+      "android/java/src/org/chromium/base/ContentUriUtils.java",
+      "android/java/src/org/chromium/base/ContextUtils.java",
+      "android/java/src/org/chromium/base/CpuFeatures.java",
+      "android/java/src/org/chromium/base/EventLog.java",
+      "android/java/src/org/chromium/base/FieldTrialList.java",
+      "android/java/src/org/chromium/base/FileUtils.java",
+      "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
+      "android/java/src/org/chromium/base/JNIUtils.java",
+      "android/java/src/org/chromium/base/JavaHandlerThread.java",
+      "android/java/src/org/chromium/base/LocaleUtils.java",
+      "android/java/src/org/chromium/base/Log.java",
+      "android/java/src/org/chromium/base/MemoryPressureListener.java",
+      "android/java/src/org/chromium/base/ObserverList.java",
+      "android/java/src/org/chromium/base/PackageUtils.java",
+      "android/java/src/org/chromium/base/PathService.java",
+      "android/java/src/org/chromium/base/PathUtils.java",
+      "android/java/src/org/chromium/base/PerfTraceEvent.java",
+      "android/java/src/org/chromium/base/PowerMonitor.java",
+      "android/java/src/org/chromium/base/PowerStatusReceiver.java",
+      "android/java/src/org/chromium/base/Promise.java",
+      "android/java/src/org/chromium/base/ResourceExtractor.java",
+      "android/java/src/org/chromium/base/SecureRandomInitializer.java",
+      "android/java/src/org/chromium/base/StreamUtil.java",
+      "android/java/src/org/chromium/base/SysUtils.java",
+      "android/java/src/org/chromium/base/SystemMessageHandler.java",
+      "android/java/src/org/chromium/base/ThreadUtils.java",
+      "android/java/src/org/chromium/base/TraceEvent.java",
+      "android/java/src/org/chromium/base/VisibleForTesting.java",
+      "android/java/src/org/chromium/base/annotations/AccessedByNative.java",
+      "android/java/src/org/chromium/base/annotations/CalledByNative.java",
+      "android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java",
+      "android/java/src/org/chromium/base/annotations/JNIAdditionalImport.java",
+      "android/java/src/org/chromium/base/annotations/JNINamespace.java",
+      "android/java/src/org/chromium/base/annotations/MainDex.java",
+      "android/java/src/org/chromium/base/annotations/NativeCall.java",
+      "android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java",
+      "android/java/src/org/chromium/base/annotations/RemovableInRelease.java",
+      "android/java/src/org/chromium/base/annotations/SuppressFBWarnings.java",
+      "android/java/src/org/chromium/base/annotations/UsedByReflection.java",
+      "android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
+      "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
+      "android/java/src/org/chromium/base/library_loader/Linker.java",
+      "android/java/src/org/chromium/base/library_loader/LoaderErrors.java",
+      "android/java/src/org/chromium/base/library_loader/ModernLinker.java",
+      "android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
+      "android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
+      "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
+      "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+      "android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
+    ]
 
-    # A new version of NativeLibraries.java (with the actual correct values)
-    # will be created when creating an apk.
+    # New versions of BuildConfig.java and NativeLibraries.java
+    # (with the actual correct values) will be created when creating an apk.
     jar_excluded_patterns = [
+      "*/BuildConfig.class",
       "*/NativeLibraries.class",
       "*/NativeLibraries##*.class",
     ]
@@ -2071,7 +2303,14 @@
       ":base_java",
       ":base_java_test_support",
     ]
-    DEPRECATED_java_in_dir = "android/javatests/src"
+    java_files = [
+      "android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
+      "android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
+      "android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
+      "android/javatests/src/org/chromium/base/CommandLineTest.java",
+      "android/javatests/src/org/chromium/base/ObserverListTest.java",
+      "android/javatests/src/org/chromium/base/metrics/RecordHistogramTest.java",
+    ]
   }
 
   # GYP: //base.gyp:base_java_test_support
@@ -2080,7 +2319,42 @@
       ":base_java",
       "//testing/android/reporter:reporter_java",
     ]
-    DEPRECATED_java_in_dir = "test/android/javatests/src"
+    java_files = [
+      "test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
+      "test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
+      "test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Feature.java",
+      "test/android/javatests/src/org/chromium/base/test/util/FlakyTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/InMemorySharedPreferences.java",
+      "test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Manual.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/PerfTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Restriction.java",
+      "test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java",
+      "test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java",
+      "test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TestThread.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TimeoutScale.java",
+      "test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/BaseParameter.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/Parameter.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/Parameterizable.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/ParameterizedTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/parameters/MethodParameter.java",
+    ]
   }
 
   # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
@@ -2094,6 +2368,7 @@
       "//third_party/robolectric:android-all-4.3_r2-robolectric-0",
       "//third_party/robolectric:robolectric_java",
     ]
+    srcjar_deps = [ ":base_build_config_gen" ]
   }
 
   # GYP: //base.gyp:base_junit_tests
@@ -2101,7 +2376,11 @@
     java_files = [
       "android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
       "android/junit/src/org/chromium/base/LogTest.java",
+      "android/junit/src/org/chromium/base/PromiseTest.java",
       "test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java",
     ]
     deps = [
       ":base_java",
@@ -2123,15 +2402,17 @@
     ]
   }
 
-  # GYP: //base/base.gyp:base_multidex_gen
-  java_cpp_template("base_multidex_gen") {
+  # GYP: //base/base.gyp:base_build_config_gen
+  java_cpp_template("base_build_config_gen") {
     sources = [
-      "android/java/templates/ChromiumMultiDex.template",
+      "android/java/templates/BuildConfig.template",
     ]
-    if (is_debug) {
-      defines = [ "MULTIDEX_CONFIGURATION_Debug" ]
+    package_name = "org/chromium/base"
+
+    defines = []
+    if (!is_java_debug) {
+      defines += [ "NDEBUG" ]
     }
-    package_name = "org/chromium/base/multidex"
   }
 
   # GYP: //base/base.gyp:base_native_libraries_gen
diff --git a/base/OWNERS b/base/OWNERS
index 4d4a239..b6cfce4 100644
--- a/base/OWNERS
+++ b/base/OWNERS
@@ -1,8 +1,5 @@
-mark@chromium.org
-thakis@chromium.org
-danakj@chromium.org
-thestig@chromium.org
-
+# About src/base:
+#
 # Chromium is a very mature project, most things that are generally useful are
 # already here, and that things not here aren't generally useful.
 #
@@ -15,11 +12,20 @@
 # Adding a new logging macro DPVELOG_NE is not more clear than just
 # writing the stuff you want to log in a regular logging statement, even
 # if it makes your calling code longer. Just add it to your own code.
+#
+# If the code in question does not need to be used inside base, but will have
+# multiple consumers across the codebase, consider placing it in a new directory
+# under components/ instead.
 
-per-file *.isolate=maruel@chromium.org
-per-file *.isolate=tandrii@chromium.org
-per-file *.isolate=vadimsh@chromium.org
-per-file security_unittest.cc=jln@chromium.org
+mark@chromium.org
+thakis@chromium.org
+danakj@chromium.org
+thestig@chromium.org
+dcheng@chromium.org
+
+# For Bind/Callback:
+per-file bind*=tzik@chromium.org
+per-file callback*=tzik@chromium.org
 
 # For Android-specific changes:
 per-file *android*=nyquist@chromium.org
@@ -30,3 +36,11 @@
 # For FeatureList API:
 per-file feature_list*=asvitkine@chromium.org
 per-file feature_list*=isherman@chromium.org
+
+# For bot infrastructure:
+per-file *.isolate=maruel@chromium.org
+per-file *.isolate=tandrii@chromium.org
+per-file *.isolate=vadimsh@chromium.org
+
+# For TCMalloc tests:
+per-file security_unittest.cc=jln@chromium.org
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 9d09a35..490b8e8 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -2,13 +2,10 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import("//build/buildflag_header.gni")
 import("//build/config/allocator.gni")
 import("//build/config/compiler/compiler.gni")
 
-if (is_win) {
-  import("//build/config/win/visual_studio_version.gni")
-}
-
 declare_args() {
   # Provide a way to force disable debugallocation in Debug builds,
   # e.g. for profiling (it's more rare to profile Debug builds,
@@ -16,18 +13,20 @@
   enable_debugallocation = is_debug
 }
 
-# Only executables and not libraries should depend on the allocator target;
-# only the application (the final executable) knows what allocator makes sense.
+# Allocator shim is only enabled for Release static builds.
+win_use_allocator_shim = is_win && !is_component_build && !is_debug
+
 # This "allocator" meta-target will forward to the default allocator according
 # to the build settings.
 group("allocator") {
   public_deps = []
+  deps = []
+
   if (use_allocator == "tcmalloc") {
-    public_deps += [ ":tcmalloc" ]
+    deps += [ ":tcmalloc" ]
   }
 
-  # This condition expresses the win_use_allocator_shim in the GYP build.
-  if (is_win && !is_component_build && visual_studio_version != "2015") {
+  if (win_use_allocator_shim) {
     public_deps += [ ":allocator_shim" ]
   }
 }
@@ -39,19 +38,23 @@
 # assumes that the library using it will eventually be linked with
 # //base/allocator in the default way. Clean this up and delete this.
 config("allocator_shim_define") {
-  if (is_win && !is_component_build && visual_studio_version != "2015") {
+  if (win_use_allocator_shim) {
     defines = [ "ALLOCATOR_SHIM" ]
   }
 }
 
 config("tcmalloc_flags") {
+  defines = []
   if (enable_debugallocation) {
-    defines = [
+    defines += [
       # Use debugallocation for Debug builds to catch problems early
       # and cleanly, http://crbug.com/30715 .
       "TCMALLOC_FOR_DEBUGALLOCATION",
     ]
   }
+  if (use_experimental_allocator_shim) {
+    defines += [ "TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC" ]
+  }
   if (is_clang) {
     cflags = [
       # tcmalloc initializes some fields in the wrong order.
@@ -68,51 +71,30 @@
       # typedefs.
       "-Wno-unused-private-field",
     ]
+  } else {
+    cflags = []
+  }
+
+  if (is_linux || is_android) {
+    # We enable all warnings by default, but upstream disables a few.
+    # Keep "-Wno-*" flags in sync with upstream by comparing against:
+    # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
+    cflags += [
+      "-Wno-sign-compare",
+      "-Wno-unused-result",
+    ]
   }
 }
 
-# This config and libc modification are only used on Windows.
-if (is_win) {
-  config("nocmt") {
-    ldflags = [
-      "/NODEFAULTLIB:libcmt",
-      "/NODEFAULTLIB:libcmtd",
+# This config is only used on Windows static release builds for the
+# allocator shim.
+if (win_use_allocator_shim) {
+  source_set("allocator_shim") {
+    sources = [
+      "allocator_shim_win.cc",
+      "allocator_shim_win.h",
     ]
-    libs = [ rebase_path("$target_gen_dir/allocator/libcmt.lib") ]
-  }
-
-  if (!is_component_build && visual_studio_version != "2015") {
-    action("prep_libc") {
-      script = "prep_libc.py"
-      outputs = [
-        "$target_gen_dir/allocator/libcmt.lib",
-      ]
-      args = [
-        visual_studio_path + "/vc/lib",
-        rebase_path("$target_gen_dir/allocator"),
-        current_cpu,
-
-        # The environment file in the build directory. This is required because
-        # the Windows toolchain setup saves the VC paths and such so that
-        # running "mc.exe" will work with the configured toolchain. This file
-        # is in the root build dir.
-        "environment.$current_cpu",
-      ]
-    }
-
-    source_set("allocator_shim") {
-      sources = [
-        "allocator_shim_win.cc",
-      ]
-      configs -= [ "//build/config/compiler:chromium_code" ]
-      configs += [ "//build/config/compiler:no_chromium_code" ]
-
-      public_configs = [ ":nocmt" ]
-      deps = [
-        ":prep_libc",
-        "//base",
-      ]
-    }
+    configs += [ ":allocator_shim_define" ]
   }
 }
 
@@ -258,15 +240,16 @@
         "$tcmalloc_dir/src/windows/port.h",
       ]
 
-      # We enable all warnings by default, but upstream disables a few.
-      # Keep "-Wno-*" flags in sync with upstream by comparing against:
-      # http://code.google.com/p/google-perftools/source/browse/trunk/Makefile.am
-      cflags = [
-        "-Wno-sign-compare",
-        "-Wno-unused-result",
-      ]
-
-      configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+      # Compiling tcmalloc with -fvisibility=default is only necessary when
+      # not using the allocator shim, which provides the correct visibility
+      # annotations for those symbols which need to be exported (see
+      # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+      # //base/allocator/allocator_shim_internals.h for the definition of
+      # SHIM_ALWAYS_EXPORT).
+      if (!use_experimental_allocator_shim) {
+        configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+        configs += [ "//build/config/gcc:symbol_visibility_default" ]
+      }
 
       ldflags = [
         # Don't let linker rip this symbol out, otherwise the heap&cpu
@@ -290,3 +273,55 @@
     deps += [ "//base/third_party/dynamic_annotations" ]
   }
 }  # use_allocator == "tcmalloc"
+
+buildflag_header("features") {
+  header = "features.h"
+  flags = [ "USE_EXPERIMENTAL_ALLOCATOR_SHIM=$use_experimental_allocator_shim" ]
+}
+
+if (use_experimental_allocator_shim) {
+  # Used to shim malloc symbols on Android. see //base/allocator/README.md.
+  config("wrap_malloc_symbols") {
+    ldflags = [
+      "-Wl,-wrap,calloc",
+      "-Wl,-wrap,free",
+      "-Wl,-wrap,malloc",
+      "-Wl,-wrap,memalign",
+      "-Wl,-wrap,posix_memalign",
+      "-Wl,-wrap,pvalloc",
+      "-Wl,-wrap,realloc",
+      "-Wl,-wrap,valloc",
+    ]
+  }
+
+  source_set("unified_allocator_shim") {
+    # TODO(primiano): support other platforms, currently this works only on
+    # Linux/CrOS/Android. http://crbug.com/550886 .
+    configs += [ "//base:base_implementation" ]  # for BASE_EXPORT
+    visibility = [ "//base:base" ]
+    sources = [
+      "allocator_shim.cc",
+      "allocator_shim.h",
+      "allocator_shim_internals.h",
+      "allocator_shim_override_cpp_symbols.h",
+      "allocator_shim_override_libc_symbols.h",
+    ]
+    if (is_linux && use_allocator == "tcmalloc") {
+      sources += [
+        "allocator_shim_default_dispatch_to_tcmalloc.cc",
+        "allocator_shim_override_glibc_weak_symbols.h",
+      ]
+      deps = [
+        ":tcmalloc",
+      ]
+    } else if (is_linux && use_allocator == "none") {
+      sources += [ "allocator_shim_default_dispatch_to_glibc.cc" ]
+    } else if (is_android && use_allocator == "none") {
+      sources += [
+        "allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
+        "allocator_shim_override_linker_wrapped_symbols.h",
+      ]
+      all_dependent_configs = [ ":wrap_malloc_symbols" ]
+    }
+  }
+}
diff --git a/base/allocator/OWNERS b/base/allocator/OWNERS
index 5d9997b..f26394a 100644
--- a/base/allocator/OWNERS
+++ b/base/allocator/OWNERS
@@ -1,3 +1,4 @@
+primiano@chromium.org
 wfh@chromium.org
 
 # For changes to tcmalloc it is advisable to ask jar@chromium.org
diff --git a/base/allocator/README b/base/allocator/README
deleted file mode 100644
index 8a5595f..0000000
--- a/base/allocator/README
+++ /dev/null
@@ -1,56 +0,0 @@
-Notes about the Chrome memory allocator.
-
-Background
-----------
-We use this library as a generic way to fork into any of several allocators.
-Currently we can, at runtime, switch between:
-   the default windows allocator
-   the windows low-fragmentation-heap
-   tcmalloc
-
-The mechanism for hooking LIBCMT in windows is rather tricky.  The core
-problem is that by default, the windows library does not declare malloc and
-free as weak symbols.  Because of this, they cannot be overriden.  To work
-around this, we start with the LIBCMT.LIB, and manually remove all allocator
-related functions from it using the visual studio library tool.  Once removed,
-we can now link against the library and provide custom versions of the 
-allocator related functionality.
-
-
-Source code
------------
-This directory contains just the allocator (i.e. shim) layer that switches
-between the different underlying memory allocation implementations.
-
-The tcmalloc library originates outside of Chromium and exists in
-../../third_party/tcmalloc (currently, the actual location is defined in the
-allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
-track Chromium-specific changes independently from upstream changes.
-
-The general intent is to push local changes upstream so that over
-time we no longer need any forked files.
-
-
-Adding a new allocator
-----------------------
-Adding a new allocator requires definition of the following five functions:
-
-  extern "C" {
-    bool init();
-    void* malloc(size_t s);
-    void* realloc(void* p, size_t s);
-    void free(void* s);
-    size_t msize(void* p);
-  }
-
-All other allocation related functions (new/delete/calloc/etc) have been
-implemented generically to work across all allocators.
-
-
-Usage
------
-You can use the different allocators by setting the environment variable
-CHROME_ALLOCATOR to:
-   "tcmalloc"  - TC Malloc (default)
-   "winheap"   - Windows default heap
-   "winlfh"    - Windows Low-Fragmentation heap
diff --git a/base/allocator/README.md b/base/allocator/README.md
new file mode 100644
index 0000000..164df51
--- /dev/null
+++ b/base/allocator/README.md
@@ -0,0 +1,196 @@
+This document describes how malloc / new calls are routed in the various Chrome
+platforms.
+
+Bare in mind that the chromium codebase does not always just use `malloc()`.
+Some examples:
+ - Large parts of the renderer (Blink) use two home-brewed allocators,
+   PartitionAlloc and BlinkGC (Oilpan).
+ - Some subsystems, such as the V8 JavaScript engine, handle memory management
+   autonomously.
+ - Various parts of the codebase use abstractions such as `SharedMemory` or
+   `DiscardableMemory` which, similarly to the above, have their own page-level
+   memory management.
+
+Background
+----------
+The `allocator` target defines at compile-time the platform-specific choice of
+the allocator and extra-hooks which services calls to malloc/new. The relevant
+build-time flags involved are `use_allocator` and `win_use_allocator_shim`.
+
+The default choices are as follows:
+
+**Windows**  
+`use_allocator: winheap`, the default Windows heap.
+Additionally, `static_library` (i.e. non-component) builds have a shim
+layer wrapping malloc/new, which is controlled by `win_use_allocator_shim`.  
+The shim layer provides extra security features, such as preventing large
+allocations that can hit signed vs. unsigned bugs in third_party code.
+
+**Linux Desktop / CrOS**  
+`use_allocator: tcmalloc`, a forked copy of tcmalloc which resides in
+`third_party/tcmalloc/chromium`. Setting `use_allocator: none` causes the build
+to fall back to the system (Glibc) symbols.
+
+**Android**  
+`use_allocator: none`, always use the allocator symbols coming from Android's
+libc (Bionic). As it is developed as part of the OS, it is considered to be
+optimized for small devices and more memory-efficient than other choices.  
+The actual implementation backing malloc symbols in Bionic is up to the board
+config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
+
+**Mac/iOS**  
+`use_allocator: none`, we always use the system's allocator implementation.
+
+In addition, when building for `asan` / `msan` / `syzyasan` `valgrind`, the
+both the allocator and the shim layer are disabled.
+
+Layering and build deps
+-----------------------
+The `allocator` target provides both the source files for tcmalloc (where
+applicable) and the linker flags required for the Windows shim layer.
+The `base` target is (almost) the only one depending on `allocator`. No other
+targets should depend on it, with the exception of the very few executables /
+dynamic libraries that don't depend, either directly or indirectly, on `base`
+within the scope of a linker unit.
+
+More importantly, **no other place outside of `/base` should depend on the
+specific allocator** (e.g., directly include `third_party/tcmalloc`).
+If such a functional dependency is required that should be achieved using
+abstractions in `base` (see `/base/allocator/allocator_extension.h` and
+`/base/memory/`)
+
+**Why `base` depends on `allocator`?**  
+Because it needs to provide services that depend on the actual allocator
+implementation. In the past `base` used to pretend to be allocator-agnostic
+and get the dependencies injected by other layers. This ended up being an
+inconsistent mess.
+See the [allocator cleanup doc][url-allocator-cleanup] for more context.
+
+Linker unit targets (executables and shared libraries) that depend in some way
+on `base` (most of the targets in the codebase) get automatically the correct
+set of linker flags to pull in tcmalloc or the Windows shim-layer.
+
+
+Source code
+-----------
+This directory contains just the allocator (i.e. shim) layer that switches
+between the different underlying memory allocation implementations.
+
+The tcmalloc library originates outside of Chromium and exists in
+`../../third_party/tcmalloc` (currently, the actual location is defined in the
+allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
+track Chromium-specific changes independently from upstream changes.
+
+The general intent is to push local changes upstream so that over
+time we no longer need any forked files.
+
+
+Unified allocator shim
+----------------------
+On most platform, Chrome overrides the malloc / operator new symbols (and
+corresponding free / delete and other variants). This is to enforce security
+checks and lately to enable the
+[memory-infra heap profiler][url-memory-infra-heap-profiler].  
+Historically each platform had its special logic for defining the allocator
+symbols in different places of the codebase. The unified allocator shim is
+a project aimed to unify the symbol definition and allocator routing logic in
+a central place.
+
+ - Full documentation: [Allocator shim design doc][url-allocator-shim].
+ - Current state: Available and enabled by default on Linux, CrOS and Android.
+ - Tracking bug: [https://crbug.com/550886][crbug.com/550886].
+ - Build-time flag: `use_experimental_allocator_shim`.
+
+**Overview of the unified allocator shim**  
+The allocator shim consists of three stages:
+```
++-------------------------+    +-----------------------+    +----------------+
+|     malloc & friends    | -> |       shim layer      | -> |   Routing to   |
+|    symbols definition   |    |     implementation    |    |    allocator   |
++-------------------------+    +-----------------------+    +----------------+
+| - libc symbols (malloc, |    | - Security checks     |    | - tcmalloc     |
+|   calloc, free, ...)    |    | - Chain of dispatchers|    | - glibc        |
+| - C++ symbols (operator |    |   that can intercept  |    | - Android      |
+|   new, delete, ...)     |    |   and override        |    |   bionic       |
+| - glibc weak symbols    |    |   allocations         |    | - WinHeap      |
+|   (__libc_malloc, ...)  |    +-----------------------+    +----------------+
++-------------------------+
+```
+
+**1. malloc symbols definition**  
+This stage takes care of overriding the symbols `malloc`, `free`,
+`operator new`, `operator delete` and friends and routing those calls inside the
+allocator shim (next point).
+This is taken care of by the headers in `allocator_shim_override_*`.
+
+*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
+in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
+and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
+`operator delete` and friends).
+This enables proper interposition of malloc symbols referenced by the main
+executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
+(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
+Additionally, when tcmalloc is the default allocator, some extra glibc symbols
+are also defined in `allocator_shim_override_glibc_weak_symbols.h`, for subtle
+reasons explained in that file.
+The Linux/CrOS shim was introduced by
+[crrev.com/1675143004](https://crrev.com/1675143004).
+
+*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
+possible. This is because Android processes are `fork()`-ed from the Android
+zygote, which pre-loads libc.so and only later native code gets loaded via
+`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
+scope).
+In this case, the approach instead of wrapping symbol resolution at link time
+(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
+The use of this wrapping flag causes:
+ - All references to allocator symbols in the Chrome codebase to be rewritten as
+   references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
+   defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
+   route allocator calls inside the shim layer.
+ - The reference to the original `malloc` symbols (which typically is defined by
+   the system's libc.so) are accessible via the special `__real_malloc` and
+   friends symbols (which will be relocated, at load time, against `malloc`).
+
+In summary, this approach is transparent to the dynamic loader, which still sees
+undefined symbol references to malloc symbols.
+These symbols will be resolved against libc.so as usual.
+More details in [crrev.com/1719433002](https://crrev.com/1719433002).
+
+**2. Shim layer implementation**  
+This stage contains the actual shim implementation. This consists of:
+- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
+(using the `InsertAllocatorDispatch` API). They can intercept and override
+allocator calls.
+- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
+This happens inside `allocator_shim.cc`
+
+**3. Final allocator routing**  
+The final element of the aforementioned dispatcher chain is statically defined
+at build time and ultimately routes the allocator calls to the actual allocator
+(as described in the *Background* section above). This is taken care of by the
+headers in `allocator_shim_default_dispatch_to_*` files.
+
+
+Appendixes
+----------
+**How does the Windows shim layer replace the malloc symbols?**  
+The mechanism for hooking LIBCMT in Windows is rather tricky.  The core
+problem is that by default, the Windows library does not declare malloc and
+free as weak symbols.  Because of this, they cannot be overridden.  To work
+around this, we start with the LIBCMT.LIB, and manually remove all allocator
+related functions from it using the visual studio library tool.  Once removed,
+we can now link against the library and provide custom versions of the
+allocator related functionality.
+See the script `preb_libc.py` in this folder.
+
+Related links
+-------------
+- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
+- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
+- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
+- [Memory-Infra: Tools to profile memory usage in Chrome](components/tracing/docs/memory_infra.md)
+
+[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
+[url-memory-infra-heap-profiler]: components/tracing/docs/heap_profiler.md
+[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
diff --git a/base/allocator/allocator.gyp b/base/allocator/allocator.gyp
index 45a95bb..674d4d6 100644
--- a/base/allocator/allocator.gyp
+++ b/base/allocator/allocator.gyp
@@ -22,15 +22,22 @@
     'disable_debugallocation%': 0,
   },
   'targets': [
-    # Only executables and not libraries should depend on the
-    # allocator target; only the application (the final executable)
-    # knows what allocator makes sense.
+    # The only targets that should depend on allocator are 'base' and
+    # executables that don't depend, directly or indirectly, on base (a few).
+    # All the other targets get a transitive dependency on this target via base.
     {
       'target_name': 'allocator',
-      # TODO(primiano): This should be type: none for the noop cases (an empty
-      # static lib can confuse some gyp generators). Fix it once the refactoring
-      # (crbug.com/564618) bring this file to a saner state (fewer conditions).
-      'type': 'static_library',
+      'variables': {
+        'conditions': [
+          ['use_allocator!="none" or (OS=="win" and win_use_allocator_shim==1)', {
+            'allocator_target_type%': 'static_library',
+          }, {
+            'allocator_target_type%': 'none',
+          }],
+        ],
+      },
+      'type': '<(allocator_target_type)',
+      'toolsets': ['host', 'target'],
       'conditions': [
         ['OS=="win" and win_use_allocator_shim==1', {
           'msvs_settings': {
@@ -42,14 +49,12 @@
               'AdditionalOptions': ['/ignore:4006'],
             },
           },
-          'dependencies': [
-            'libcmt',
-          ],
           'include_dirs': [
             '../..',
           ],
           'sources': [
             'allocator_shim_win.cc',
+            'allocator_shim_win.h',
           ],
           'configurations': {
             'Debug_Base': {
@@ -60,20 +65,6 @@
               },
             },
           },
-          'direct_dependent_settings': {
-            'configurations': {
-              'Common_Base': {
-                'msvs_settings': {
-                  'VCLinkerTool': {
-                    'IgnoreDefaultLibraryNames': ['libcmtd.lib', 'libcmt.lib'],
-                    'AdditionalDependencies': [
-                      '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib'
-                    ],
-                  },
-                },
-              },
-            },
-          },
         }],  # OS=="win"
         ['use_allocator=="tcmalloc"', {
           # Disable the heap checker in tcmalloc.
@@ -310,9 +301,6 @@
                 '-Wno-sign-compare',
                 '-Wno-unused-result',
               ],
-              'cflags!': [
-                '-fvisibility=hidden',
-              ],
               'link_settings': {
                 'ldflags': [
                   # Don't let linker rip this symbol out, otherwise the heap&cpu
@@ -324,6 +312,19 @@
                   '-Wl,-u_ZN15HeapLeakChecker12IgnoreObjectEPKv,-u_ZN15HeapLeakChecker14UnIgnoreObjectEPKv',
                 ],
               },
+              # Compiling tcmalloc with -fvisibility=default is only necessary when
+              # not using the allocator shim, which provides the correct visibility
+              # annotations for those symbols which need to be exported (see
+              # //base/allocator/allocator_shim_override_glibc_weak_symbols.h and
+              # //base/allocator/allocator_shim_internals.h for the definition of
+              # SHIM_ALWAYS_EXPORT).
+              'conditions': [
+                ['use_experimental_allocator_shim==0', {
+                  'cflags!': [
+                    '-fvisibility=hidden',
+                  ],
+                }],
+              ],
             }],
             ['profiling!=1', {
               'sources!': [
@@ -337,6 +338,11 @@
                 '<(tcmalloc_dir)/src/profiler.cc',
               ],
             }],
+            ['use_experimental_allocator_shim==1', {
+              'defines': [
+                'TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC',
+              ],
+            }]
           ],
           'configurations': {
             'Debug_Base': {
@@ -370,33 +376,75 @@
         }],
       ],  # conditions of 'allocator' target.
     },  # 'allocator' target.
+    {
+      # GN: //base/allocator:features
+      # When referenced from a target that might be compiled in the host
+      # toolchain, always refer to 'allocator_features#target'.
+      'target_name': 'allocator_features',
+      'includes': [ '../../build/buildflag_header.gypi' ],
+      'variables': {
+        'buildflag_header_path': 'base/allocator/features.h',
+        'buildflag_flags': [
+          'USE_EXPERIMENTAL_ALLOCATOR_SHIM=<(use_experimental_allocator_shim)',
+        ],
+      },
+    },  # 'allocator_features' target.
   ],  # targets.
   'conditions': [
-    ['OS=="win" and component!="shared_library"', {
+    ['use_experimental_allocator_shim==1', {
       'targets': [
         {
-          'target_name': 'libcmt',
-          'type': 'none',
-          'actions': [
-            {
-              'action_name': 'libcmt',
-              'inputs': [
-                'prep_libc.py',
-              ],
-              'outputs': [
-                '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib',
-              ],
-              'action': [
-                'python',
-                'prep_libc.py',
-                '$(VCInstallDir)lib',
-                '<(SHARED_INTERMEDIATE_DIR)/allocator',
-                '<(target_arch)',
-              ],
-            },
+          # GN: //base/allocator:unified_allocator_shim
+          'target_name': 'unified_allocator_shim',
+          'toolsets': ['host', 'target'],
+          'type': 'static_library',
+          'defines': [ 'BASE_IMPLEMENTATION' ],
+          'sources': [
+            'allocator_shim.cc',
+            'allocator_shim.h',
+            'allocator_shim_internals.h',
+            'allocator_shim_override_cpp_symbols.h',
+            'allocator_shim_override_libc_symbols.h',
           ],
-        },
+          'include_dirs': [
+            '../..',
+          ],
+          'conditions': [
+            ['OS=="linux" and use_allocator=="tcmalloc"', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_tcmalloc.cc',
+                'allocator_shim_override_glibc_weak_symbols.h',
+              ],
+            }],
+            ['use_allocator=="none" and (OS=="linux" or (OS=="android" and _toolset == "host" and host_os == "linux"))', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_glibc.cc',
+              ],
+            }],
+            ['OS=="android" and _toolset == "target"', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc',
+                'allocator_shim_override_linker_wrapped_symbols.h',
+              ],
+              # On Android all references to malloc & friends symbols are
+              # rewritten, at link time, and routed to the shim.
+              # See //base/allocator/README.md.
+              'all_dependent_settings': {
+                'ldflags': [
+                  '-Wl,-wrap,calloc',
+                  '-Wl,-wrap,free',
+                  '-Wl,-wrap,malloc',
+                  '-Wl,-wrap,memalign',
+                  '-Wl,-wrap,posix_memalign',
+                  '-Wl,-wrap,pvalloc',
+                  '-Wl,-wrap,realloc',
+                  '-Wl,-wrap,valloc',
+                ],
+              },
+            }],
+          ]
+        },  # 'unified_allocator_shim' target.
       ],
-    }],
+    }]
   ],
 }
diff --git a/base/allocator/allocator_extension.cc b/base/allocator/allocator_extension.cc
index 4f0b3a9..9a3d114 100644
--- a/base/allocator/allocator_extension.cc
+++ b/base/allocator/allocator_extension.cc
@@ -6,34 +6,54 @@
 
 #include "base/logging.h"
 
+#if defined(USE_TCMALLOC)
+#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_extension.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_hook.h"
+#endif
+
 namespace base {
 namespace allocator {
 
-namespace {
-ReleaseFreeMemoryFunction g_release_free_memory_function = nullptr;
-GetNumericPropertyFunction g_get_numeric_property_function = nullptr;
-}
-
 void ReleaseFreeMemory() {
-  if (g_release_free_memory_function)
-    g_release_free_memory_function();
+#if defined(USE_TCMALLOC)
+  ::MallocExtension::instance()->ReleaseFreeMemory();
+#endif
 }
 
 bool GetNumericProperty(const char* name, size_t* value) {
-  return g_get_numeric_property_function &&
-         g_get_numeric_property_function(name, value);
+#if defined(USE_TCMALLOC)
+  return ::MallocExtension::instance()->GetNumericProperty(name, value);
+#endif
+  return false;
 }
 
-void SetReleaseFreeMemoryFunction(
-    ReleaseFreeMemoryFunction release_free_memory_function) {
-  DCHECK(!g_release_free_memory_function);
-  g_release_free_memory_function = release_free_memory_function;
+bool IsHeapProfilerRunning() {
+#if defined(USE_TCMALLOC)
+  return ::IsHeapProfilerRunning();
+#endif
+  return false;
 }
 
-void SetGetNumericPropertyFunction(
-    GetNumericPropertyFunction get_numeric_property_function) {
-  DCHECK(!g_get_numeric_property_function);
-  g_get_numeric_property_function = get_numeric_property_function;
+void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
+// TODO(sque): Use allocator shim layer instead.
+#if defined(USE_TCMALLOC)
+  // Make sure no hooks get overwritten.
+  auto prev_alloc_hook = MallocHook::SetNewHook(alloc_hook);
+  if (alloc_hook)
+    DCHECK(!prev_alloc_hook);
+
+  auto prev_free_hook = MallocHook::SetDeleteHook(free_hook);
+  if (free_hook)
+    DCHECK(!prev_free_hook);
+#endif
+}
+
+int GetCallStack(void** stack, int max_stack_size) {
+#if defined(USE_TCMALLOC)
+  return MallocHook::GetCallerStackTrace(stack, max_stack_size, 0);
+#endif
+  return 0;
 }
 
 }  // namespace allocator
diff --git a/base/allocator/allocator_extension.h b/base/allocator/allocator_extension.h
index 3be2cea..9f2775a 100644
--- a/base/allocator/allocator_extension.h
+++ b/base/allocator/allocator_extension.h
@@ -13,8 +13,9 @@
 namespace base {
 namespace allocator {
 
-typedef void (*ReleaseFreeMemoryFunction)();
-typedef bool (*GetNumericPropertyFunction)(const char* name, size_t* value);
+// Callback types for alloc and free.
+using AllocHookFunc = void (*)(const void*, size_t);
+using FreeHookFunc = void (*)(const void*);
 
 // Request that the allocator release any free memory it knows about to the
 // system.
@@ -26,20 +27,23 @@
 // |name| or |value| cannot be NULL
 BASE_EXPORT bool GetNumericProperty(const char* name, size_t* value);
 
-// These settings allow specifying a callback used to implement the allocator
-// extension functions.  These are optional, but if set they must only be set
-// once.  These will typically called in an allocator-specific initialization
-// routine.
+BASE_EXPORT bool IsHeapProfilerRunning();
+
+// Register callbacks for alloc and free. Can only store one callback at a time
+// for each of alloc and free.
+BASE_EXPORT void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook);
+
+// Attempts to unwind the call stack from the current location where this
+// function is being called from. Must be called from a hook function registered
+// by calling SetSingle{Alloc,Free}Hook, directly or indirectly.
 //
-// No threading promises are made.  The caller is responsible for making sure
-// these pointers are set before any other threads attempt to call the above
-// functions.
-
-BASE_EXPORT void SetReleaseFreeMemoryFunction(
-    ReleaseFreeMemoryFunction release_free_memory_function);
-
-BASE_EXPORT void SetGetNumericPropertyFunction(
-    GetNumericPropertyFunction get_numeric_property_function);
+// Arguments:
+//   stack:          pointer to a pre-allocated array of void*'s.
+//   max_stack_size: indicates the size of the array in |stack|.
+//
+// Returns the number of call stack frames stored in |stack|, or 0 if no call
+// stack information is available.
+BASE_EXPORT int GetCallStack(void** stack, int max_stack_size);
 
 }  // namespace allocator
 }  // namespace base
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
new file mode 100644
index 0000000..09ed45f
--- /dev/null
+++ b/base/allocator/allocator_shim.cc
@@ -0,0 +1,260 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#include <new>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+// No calls to malloc / new in this file. They would would cause re-entrancy of
+// the shim, which is hard to deal with. Keep this code as simple as possible
+// and don't use any external C++ object here, not even //base ones. Even if
+// they are safe to use today, in future they might be refactored.
+
+namespace {
+
+using namespace base;
+
+subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
+    &allocator::AllocatorDispatch::default_dispatch);
+
+bool g_call_new_handler_on_malloc_failure = false;
+subtle::Atomic32 g_new_handler_lock = 0;
+
+// In theory this should be just base::ThreadChecker. But we can't afford
+// the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
+bool CalledOnValidThread() {
+  using subtle::Atomic32;
+  const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
+  static Atomic32 g_tid = kInvalidTID;
+  Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
+  Atomic32 prev_tid =
+      subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
+  return prev_tid == kInvalidTID || prev_tid == cur_tid;
+}
+
+inline size_t GetPageSize() {
+  static size_t pagesize = 0;
+  if (!pagesize)
+    pagesize = sysconf(_SC_PAGESIZE);
+  return pagesize;
+}
+
+// Calls the std::new handler thread-safely. Returns true if a new_handler was
+// set and called, false if no new_handler was set.
+bool CallNewHandler() {
+  // TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed
+  // to be thread safe and would avoid the spinlock boilerplate here. However
+  // it doesn't seem to be available yet in the Linux chroot headers yet.
+  std::new_handler nh;
+  {
+    while (subtle::Acquire_CompareAndSwap(&g_new_handler_lock, 0, 1))
+      PlatformThread::YieldCurrentThread();
+    nh = std::set_new_handler(0);
+    ignore_result(std::set_new_handler(nh));
+    subtle::Release_Store(&g_new_handler_lock, 0);
+  }
+  if (!nh)
+    return false;
+  (*nh)();
+  // Assume the new_handler will abort if it fails. Exception are disabled and
+  // we don't support the case of a new_handler throwing std::bad_balloc.
+  return true;
+}
+
+inline const allocator::AllocatorDispatch* GetChainHead() {
+  // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
+  // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
+  // barriered on Linux+Clang, and that causes visible perf regressons.
+  return reinterpret_cast<const allocator::AllocatorDispatch*>(
+#if defined(OS_LINUX) && defined(__clang__)
+      *static_cast<const volatile subtle::AtomicWord*>(&g_chain_head)
+#else
+      subtle::NoBarrier_Load(&g_chain_head)
+#endif
+  );
+}
+
+}  // namespace
+
+namespace base {
+namespace allocator {
+
+void SetCallNewHandlerOnMallocFailure(bool value) {
+  g_call_new_handler_on_malloc_failure = value;
+}
+
+void* UncheckedAlloc(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->alloc_function(chain_head, size);
+}
+
+void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
+  // Ensure this is always called on the same thread.
+  DCHECK(CalledOnValidThread());
+
+  dispatch->next = GetChainHead();
+
+  // This function does not guarantee to be thread-safe w.r.t. concurrent
+  // insertions, but still has to guarantee that all the threads always
+  // see a consistent chain, hence the MemoryBarrier() below.
+  // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
+  // we don't really want this to be a release-store with a corresponding
+  // acquire-load during malloc().
+  subtle::MemoryBarrier();
+
+  subtle::NoBarrier_Store(&g_chain_head,
+                          reinterpret_cast<subtle::AtomicWord>(dispatch));
+}
+
+void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
+  DCHECK(CalledOnValidThread());
+  DCHECK_EQ(GetChainHead(), dispatch);
+  subtle::NoBarrier_Store(&g_chain_head,
+                          reinterpret_cast<subtle::AtomicWord>(dispatch->next));
+}
+
+}  // namespace allocator
+}  // namespace base
+
+// The Shim* functions below are the entry-points into the shim-layer and
+// are supposed to be invoked / aliased by the allocator_shim_override_*
+// headers to route the malloc / new symbols through the shim layer.
+extern "C" {
+
+// The general pattern for allocations is:
+// - Try to allocate, if succeded return the pointer.
+// - If the allocation failed:
+//   - Call the std::new_handler if it was a C++ allocation.
+//   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
+//     AND SetCallNewHandlerOnMallocFailure(true).
+//   - If the std::new_handler is NOT set just return nullptr.
+//   - If the std::new_handler is set:
+//     - Assume it will abort() if it fails (very likely the new_handler will
+//       just suicide priting a message).
+//     - Assume it did succeed if it returns, in which case reattempt the alloc.
+
+void* ShimCppNew(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_function(chain_head, size);
+  } while (!ptr && CallNewHandler());
+  return ptr;
+}
+
+void ShimCppDelete(void* address) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address);
+}
+
+void* ShimMalloc(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_function(chain_head, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+  return ptr;
+}
+
+void* ShimCalloc(size_t n, size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+  return ptr;
+}
+
+void* ShimRealloc(void* address, size_t size) {
+  // realloc(size == 0) means free() and might return a nullptr. We should
+  // not call the std::new_handler in that case, though.
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->realloc_function(chain_head, address, size);
+  } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler());
+  return ptr;
+}
+
+void* ShimMemalign(size_t alignment, size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+  return ptr;
+}
+
+int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
+  // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
+  // in tc_malloc.cc.
+  if (((alignment % sizeof(void*)) != 0) ||
+      ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
+    return EINVAL;
+  }
+  void* ptr = ShimMemalign(alignment, size);
+  *res = ptr;
+  return ptr ? 0 : ENOMEM;
+}
+
+void* ShimValloc(size_t size) {
+  return ShimMemalign(GetPageSize(), size);
+}
+
+void* ShimPvalloc(size_t size) {
+  // pvalloc(0) should allocate one page, according to its man page.
+  if (size == 0) {
+    size = GetPageSize();
+  } else {
+    size = (size + GetPageSize() - 1) & ~(GetPageSize() - 1);
+  }
+  return ShimMemalign(GetPageSize(), size);
+}
+
+void ShimFree(void* address) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address);
+}
+
+}  // extern "C"
+
+// Cpp symbols (new / delete) should always be routed through the shim layer.
+#include "base/allocator/allocator_shim_override_cpp_symbols.h"
+
+// Android does not support symbol interposition. The way malloc symbols are
+// intercepted on Android is by using link-time -wrap flags.
+#if !defined(OS_ANDROID) && !defined(ANDROID)
+// Ditto for plain malloc() / calloc() / free() etc. symbols.
+#include "base/allocator/allocator_shim_override_libc_symbols.h"
+#else
+#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
+#endif
+
+// In the case of tcmalloc we also want to plumb into the glibc hooks
+// to avoid that allocations made in glibc itself (e.g., strdup()) get
+// accidentally performed on the glibc heap instead of the tcmalloc one.
+#if defined(USE_TCMALLOC)
+#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
+#endif
+
+// Cross-checks.
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#error The allocator shim should not be compiled when building for memory tools.
+#endif
+
+#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
+    (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)
+#error This code cannot be used when exceptions are turned on.
+#endif
diff --git a/base/allocator/allocator_shim.h b/base/allocator/allocator_shim.h
new file mode 100644
index 0000000..f1a1e3d
--- /dev/null
+++ b/base/allocator/allocator_shim.h
@@ -0,0 +1,96 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace allocator {
+
+// Allocator Shim API. Allows to to:
+//  - Configure the behavior of the allocator (what to do on OOM failures).
+//  - Install new hooks (AllocatorDispatch) in the allocator chain.
+
+// When this shim layer is enabled, the route of an allocation is as-follows:
+//
+// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
+//   The override_* headers define the symbols required to intercept calls to
+//   malloc() and operator new (if not overridden by specific C++ classes).
+//
+// [allocator_shim.cc] Routing allocation calls to the shim:
+//   The headers above route the calls to the internal ShimMalloc(), ShimFree(),
+//   ShimCppNew() etc. methods defined in allocator_shim.cc.
+//   These methods will: (1) forward the allocation call to the front of the
+//   AllocatorDispatch chain. (2) perform security hardenings (e.g., might
+//   call std::new_handler on OOM failure).
+//
+// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
+//   It is a singly linked list where each element is a struct with function
+//   pointers (|malloc_function|, |free_function|, etc). Normally the chain
+//   consists of a single AllocatorDispatch element, herein called
+//   the "default dispatch", which is statically defined at build time and
+//   ultimately routes the calls to the actual allocator defined by the build
+//   config (tcmalloc, glibc, ...).
+//
+// It is possible to dynamically insert further AllocatorDispatch stages
+// to the front of the chain, for debugging / profiling purposes.
+//
+// All the functions must be thred safe. The shim does not enforce any
+// serialization. This is to route to thread-aware allocators (e.g, tcmalloc)
+// wihout introducing unnecessary perf hits.
+
+struct AllocatorDispatch {
+  using AllocFn = void*(const AllocatorDispatch* self, size_t size);
+  using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
+                                       size_t n,
+                                       size_t size);
+  using AllocAlignedFn = void*(const AllocatorDispatch* self,
+                               size_t alignment,
+                               size_t size);
+  using ReallocFn = void*(const AllocatorDispatch* self,
+                          void* address,
+                          size_t size);
+  using FreeFn = void(const AllocatorDispatch* self, void* address);
+
+  AllocFn* const alloc_function;
+  AllocZeroInitializedFn* const alloc_zero_initialized_function;
+  AllocAlignedFn* const alloc_aligned_function;
+  ReallocFn* const realloc_function;
+  FreeFn* const free_function;
+
+  const AllocatorDispatch* next;
+
+  // |default_dispatch| is statically defined by one (and only one) of the
+  // allocator_shim_default_dispatch_to_*.cc files, depending on the build
+  // configuration.
+  static const AllocatorDispatch default_dispatch;
+};
+
+// When true makes malloc behave like new, w.r.t calling the new_handler if
+// the allocation fails (see set_new_mode() in Windows).
+BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
+
+// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
+// regardless of SetCallNewHandlerOnMallocFailure().
+BASE_EXPORT void* UncheckedAlloc(size_t size);
+
+// Inserts |dispatch| in front of the allocator chain. This method is NOT
+// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
+// The callers have the responsibility of linearizing the changes to the chain
+// (or more likely call these always on the same thread).
+BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
+
+// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
+// removal of arbitrary elements from a singly linked list would require a lock
+// in malloc(), which we really don't want.
+BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
diff --git a/base/allocator/allocator_shim_default_dispatch_to_glibc.cc b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
new file mode 100644
index 0000000..02facba
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to libc functions.
+// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
+
+extern "C" {
+void* __libc_malloc(size_t size);
+void* __libc_calloc(size_t n, size_t size);
+void* __libc_realloc(void* address, size_t size);
+void* __libc_memalign(size_t alignment, size_t size);
+void __libc_free(void* ptr);
+}  // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* GlibcMalloc(const AllocatorDispatch*, size_t size) {
+  return __libc_malloc(size);
+}
+
+void* GlibcCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+  return __libc_calloc(n, size);
+}
+
+void* GlibcRealloc(const AllocatorDispatch*, void* address, size_t size) {
+  return __libc_realloc(address, size);
+}
+
+void* GlibcMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+  return __libc_memalign(alignment, size);
+}
+
+void GlibcFree(const AllocatorDispatch*, void* address) {
+  __libc_free(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &GlibcMalloc,   /* alloc_function */
+    &GlibcCalloc,   /* alloc_zero_initialized_function */
+    &GlibcMemalign, /* alloc_aligned_function */
+    &GlibcRealloc,  /* realloc_function */
+    &GlibcFree,     /* free_function */
+    nullptr,        /* next */
+};
diff --git a/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
new file mode 100644
index 0000000..7955cb7
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to the original libc functions when using the link-time
+// -Wl,-wrap,malloc approach (see README.md).
+// The __real_X functions here are special symbols that the linker will relocate
+// against the real "X" undefined symbol, so that __real_malloc becomes the
+// equivalent of what an undefined malloc symbol reference would have been.
+// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
+// which routes the __wrap_X functions into the shim.
+
+extern "C" {
+void* __real_malloc(size_t);
+void* __real_calloc(size_t, size_t);
+void* __real_realloc(void*, size_t);
+void* __real_memalign(size_t, size_t);
+void* __real_free(void*);
+}  // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* RealMalloc(const AllocatorDispatch*, size_t size) {
+  return __real_malloc(size);
+}
+
+void* RealCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+  return __real_calloc(n, size);
+}
+
+void* RealRealloc(const AllocatorDispatch*, void* address, size_t size) {
+  return __real_realloc(address, size);
+}
+
+void* RealMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+  return __real_memalign(alignment, size);
+}
+
+void RealFree(const AllocatorDispatch*, void* address) {
+  __real_free(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &RealMalloc,   /* alloc_function */
+    &RealCalloc,   /* alloc_zero_initialized_function */
+    &RealMemalign, /* alloc_aligned_function */
+    &RealRealloc,  /* realloc_function */
+    &RealFree,     /* free_function */
+    nullptr,       /* next */
+};
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
new file mode 100644
index 0000000..fc3624c
--- /dev/null
+++ b/base/allocator/allocator_shim_internals.h
@@ -0,0 +1,27 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+
+#if defined(__GNUC__)
+
+#include <sys/cdefs.h>  // for __THROW
+
+#ifndef __THROW  // Not a glibc system
+#ifdef _NOEXCEPT  // LLVM libc++ uses noexcept instead
+#define __THROW _NOEXCEPT
+#else
+#define __THROW
+#endif  // !_NOEXCEPT
+#endif
+
+// Shim layer symbols need to be ALWAYS exported, regardless of component build.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
+
+#define SHIM_ALIAS_SYMBOL(fn) __attribute__((alias(#fn)))
+
+#endif  // __GNUC__
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/base/allocator/allocator_shim_override_cpp_symbols.h b/base/allocator/allocator_shim_override_cpp_symbols.h
new file mode 100644
index 0000000..616716f
--- /dev/null
+++ b/base/allocator/allocator_shim_override_cpp_symbols.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+
+// Alias the default new/delete C++ symbols to the shim entry points.
+// This file is strongly inspired by tcmalloc's libc_override_redefine.h.
+
+#include <new>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size)
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size)
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size,
+                                      const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
+                                        const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p,
+                                          const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
diff --git a/base/allocator/allocator_shim_override_libc_symbols.h b/base/allocator/allocator_shim_override_libc_symbols.h
new file mode 100644
index 0000000..37b3b4eb
--- /dev/null
+++ b/base/allocator/allocator_shim_override_libc_symbols.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Its purpose is to SHIM_ALIAS_SYMBOL the Libc symbols for malloc/new to the
+// shim layer entry points.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+
+#include <malloc.h>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimMalloc);
+
+SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW
+    SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimRealloc);
+
+SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCalloc);
+
+SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW
+    SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW
+    SHIM_ALIAS_SYMBOL(ShimMemalign);
+
+SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimValloc);
+
+SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimPvalloc);
+
+SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW
+    SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
+
+// The default dispatch translation unit has to define also the following
+// symbols (unless they are ultimately routed to the system symbols):
+//   void malloc_stats(void);
+//   int mallopt(int, int);
+//   struct mallinfo mallinfo(void);
+//   size_t malloc_size(void*);
+//   size_t malloc_usable_size(const void*);
+
+}  // extern "C"
diff --git a/base/allocator/allocator_shim_override_linker_wrapped_symbols.h b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
new file mode 100644
index 0000000..5b85d6e
--- /dev/null
+++ b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
@@ -0,0 +1,44 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+
+// This header overrides the __wrap_X symbols when using the link-time
+// -Wl,-wrap,malloc shim-layer approach (see README.md).
+// All references to malloc, free, etc. within the linker unit that gets the
+// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
+// linker as references to __wrap_malloc, __wrap_free, which are defined here.
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimCalloc);
+
+SHIM_ALWAYS_EXPORT void __wrap_free(void*)
+    SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimMalloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimMemalign);
+
+SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void**, size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
+
+SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimPvalloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_realloc(void*, size_t)
+    SHIM_ALIAS_SYMBOL(ShimRealloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimValloc);
+
+}  // extern "C"
diff --git a/base/allocator/features.h b/base/allocator/features.h
new file mode 100644
index 0000000..eedb0b6
--- /dev/null
+++ b/base/allocator/features.h
@@ -0,0 +1,15 @@
+// Generated by build/write_buildflag_header.py
+// From "allocator_features"
+
+#ifndef BASE_ALLOCATOR_FEATURES_H_
+#define BASE_ALLOCATOR_FEATURES_H_
+
+#include "build/buildflag.h"
+
+#if defined(__APPLE__)
+#define BUILDFLAG_INTERNAL_USE_EXPERIMENTAL_ALLOCATOR_SHIM() (0)
+#else
+#define BUILDFLAG_INTERNAL_USE_EXPERIMENTAL_ALLOCATOR_SHIM() (1)
+#endif
+
+#endif  // BASE_ALLOCATOR_FEATURES_H_
diff --git a/base/allocator/prep_libc.py b/base/allocator/prep_libc.py
deleted file mode 100755
index a88d3bd..0000000
--- a/base/allocator/prep_libc.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# This script takes libcmt.lib for VS2013 and removes the allocation related
-# functions from it.
-#
-# Usage: prep_libc.py <VCLibDir> <OutputDir> <arch> [<environment_file>]
-#
-# VCLibDir is the path where VC is installed, something like:
-#    C:\Program Files\Microsoft Visual Studio 8\VC\lib
-#
-# OutputDir is the directory where the modified libcmt file should be stored.
-# arch is one of: 'ia32', 'x86' or 'x64'. ia32 and x86 are synonyms.
-#
-# If the environment_file argument is set, the environment variables in the
-# given file will be used to execute the VC tools. This file is in the same
-# format as the environment block passed to CreateProcess.
-
-import os
-import shutil
-import subprocess
-import sys
-
-def run(command, env_dict):
-  """Run |command|.  If any lines that match an error condition then
-      terminate.
-
-  The env_dict, will be used for the environment. None can be used to get the
-  default environment."""
-  error = 'cannot find member object'
-  # Need shell=True to search the path in env_dict for the executable.
-  popen = subprocess.Popen(
-      command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
-      env=env_dict)
-  out, _ = popen.communicate()
-  for line in out.splitlines():
-    print line
-    if error and line.find(error) != -1:
-      print 'prep_libc.py: Error stripping object from C runtime.'
-      sys.exit(1)
-
-def main():
-  bindir = 'SELF_X86'
-  objdir = 'INTEL'
-  vs_install_dir = sys.argv[1]
-  outdir = sys.argv[2]
-  if "x64" in sys.argv[3]:
-    bindir = 'SELF_64_amd64'
-    objdir = 'amd64'
-    vs_install_dir = os.path.join(vs_install_dir, 'amd64')
-
-  if len(sys.argv) == 5:
-    env_pairs = open(sys.argv[4]).read()[:-2].split('\0')
-    env_dict = dict([item.split('=', 1) for item in env_pairs])
-  else:
-    env_dict = None  # Use the default environment.
-
-  output_lib = os.path.join(outdir, 'libcmt.lib')
-  shutil.copyfile(os.path.join(vs_install_dir, 'libcmt.lib'), output_lib)
-  shutil.copyfile(os.path.join(vs_install_dir, 'libcmt.pdb'),
-                  os.path.join(outdir, 'libcmt.pdb'))
-  cvspath = 'f:\\binaries\\Intermediate\\vctools\\crt_bld\\' + bindir + \
-      '\\crt\\prebuild\\build\\' + objdir + '\\mt_obj\\nativec\\\\';
-  cppvspath = 'f:\\binaries\\Intermediate\\vctools\\crt_bld\\' + bindir + \
-      '\\crt\\prebuild\\build\\' + objdir + '\\mt_obj\\nativecpp\\\\';
-
-  cobjfiles = ['malloc', 'free', 'realloc', 'heapinit', 'calloc', 'recalloc',
-      'calloc_impl']
-  cppobjfiles = ['new', 'new2', 'delete', 'delete2', 'new_mode', 'newopnt',
-      'newaopnt']
-  for obj in cobjfiles:
-    cmd = ('lib /nologo /ignore:4006,4221 /remove:%s%s.obj %s' %
-           (cvspath, obj, output_lib))
-    run(cmd, env_dict)
-  for obj in cppobjfiles:
-    cmd = ('lib /nologo /ignore:4006,4221 /remove:%s%s.obj %s' %
-           (cppvspath, obj, output_lib))
-    run(cmd, env_dict)
-
-if __name__ == "__main__":
-  sys.exit(main())
diff --git a/base/at_exit.cc b/base/at_exit.cc
index 0fba355..cfe4cf9 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -6,6 +6,7 @@
 
 #include <stddef.h>
 #include <ostream>
+#include <utility>
 
 #include "base/bind.h"
 #include "base/callback.h"
@@ -21,7 +22,8 @@
 // this for thread-safe access, since it will only be modified in testing.
 static AtExitManager* g_top_manager = NULL;
 
-AtExitManager::AtExitManager() : next_manager_(g_top_manager) {
+AtExitManager::AtExitManager()
+    : processing_callbacks_(false), next_manager_(g_top_manager) {
 // If multiple modules instantiate AtExitManagers they'll end up living in this
 // module... they have to coexist.
 #if !defined(COMPONENT_BUILD)
@@ -55,7 +57,8 @@
   }
 
   AutoLock lock(g_top_manager->lock_);
-  g_top_manager->stack_.push(task);
+  DCHECK(!g_top_manager->processing_callbacks_);
+  g_top_manager->stack_.push(std::move(task));
 }
 
 // static
@@ -65,16 +68,28 @@
     return;
   }
 
-  AutoLock lock(g_top_manager->lock_);
-
-  while (!g_top_manager->stack_.empty()) {
-    base::Closure task = g_top_manager->stack_.top();
-    task.Run();
-    g_top_manager->stack_.pop();
+  // Callbacks may try to add new callbacks, so run them without holding
+  // |lock_|. This is an error and caught by the DCHECK in RegisterTask(), but
+  // handle it gracefully in release builds so we don't deadlock.
+  std::stack<base::Closure> tasks;
+  {
+    AutoLock lock(g_top_manager->lock_);
+    tasks.swap(g_top_manager->stack_);
+    g_top_manager->processing_callbacks_ = true;
   }
+
+  while (!tasks.empty()) {
+    base::Closure task = tasks.top();
+    task.Run();
+    tasks.pop();
+  }
+
+  // Expect that all callbacks have been run.
+  DCHECK(g_top_manager->stack_.empty());
 }
 
-AtExitManager::AtExitManager(bool shadow) : next_manager_(g_top_manager) {
+AtExitManager::AtExitManager(bool shadow)
+    : processing_callbacks_(false), next_manager_(g_top_manager) {
   DCHECK(shadow || !g_top_manager);
   g_top_manager = this;
 }
diff --git a/base/at_exit.h b/base/at_exit.h
index 04e3f76..02e18ed 100644
--- a/base/at_exit.h
+++ b/base/at_exit.h
@@ -59,6 +59,7 @@
  private:
   base::Lock lock_;
   std::stack<base::Closure> stack_;
+  bool processing_callbacks_;
   AtExitManager* next_manager_;  // Stack of managers to allow shadowing.
 
   DISALLOW_COPY_AND_ASSIGN(AtExitManager);
diff --git a/base/base.gyp b/base/base.gyp
index dc484f4..a534d5c 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -21,8 +21,12 @@
         'optimize': 'max',
       },
       'dependencies': [
+        'allocator/allocator.gyp:allocator',
+        'allocator/allocator.gyp:allocator_features#target',
         'base_debugging_flags#target',
+        'base_win_features#target',
         'base_static',
+        'base_build_date#target',
         '../testing/gtest.gyp:gtest_prod',
         '../third_party/modp_b64/modp_b64.gyp:modp_b64',
         'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
@@ -137,6 +141,14 @@
             }],
           ],
         }],
+        ['use_sysroot==0 and (OS == "android" or OS == "linux")', {
+          'link_settings': {
+            'libraries': [
+              # Needed for <atomic> when building with newer C++ library.
+              '-latomic',
+            ],
+          },
+        }],
         ['OS == "win"', {
           # Specify delayload for base.dll.
           'msvs_settings': {
@@ -150,6 +162,8 @@
                 'cfgmgr32.lib',
                 'powrprof.lib',
                 'setupapi.lib',
+                'userenv.lib',
+                'winmm.lib',
               ],
             },
           },
@@ -166,18 +180,12 @@
                   'cfgmgr32.lib',
                   'powrprof.lib',
                   'setupapi.lib',
+                  'userenv.lib',
+                  'winmm.lib',
                 ],
               },
             },
           },
-          'copies': [
-            {
-              'destination': '<(PRODUCT_DIR)/',
-              'files': [
-                '../build/win/dbghelp_xp/dbghelp.dll',
-              ],
-            },
-          ],
           'dependencies': [
            'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
           ],
@@ -192,6 +200,7 @@
               '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
               '$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
               '$(SDKROOT)/System/Library/Frameworks/Security.framework',
+              '$(SDKROOT)/usr/lib/libbsm.dylib',
             ],
           },
         }],
@@ -224,6 +233,9 @@
             'sync_socket_posix.cc',
           ]
         }],
+        ['use_experimental_allocator_shim==1', {
+          'dependencies': [ 'allocator/allocator.gyp:unified_allocator_shim']
+        }],
       ],
       'sources': [
         'auto_reset.h',
@@ -311,82 +323,6 @@
       ],
     },
     {
-      'target_name': 'base_prefs',
-      'type': '<(component)',
-      'variables': {
-        'enable_wexit_time_destructors': 1,
-        'optimize': 'max',
-      },
-      'dependencies': [
-        'base',
-      ],
-      'export_dependent_settings': [
-        'base',
-      ],
-      'defines': [
-        'BASE_PREFS_IMPLEMENTATION',
-      ],
-      'sources': [
-        'prefs/base_prefs_export.h',
-        'prefs/default_pref_store.cc',
-        'prefs/default_pref_store.h',
-        'prefs/json_pref_store.cc',
-        'prefs/json_pref_store.h',
-        'prefs/overlay_user_pref_store.cc',
-        'prefs/overlay_user_pref_store.h',
-        'prefs/persistent_pref_store.h',
-        'prefs/pref_change_registrar.cc',
-        'prefs/pref_change_registrar.h',
-        'prefs/pref_filter.h',
-        'prefs/pref_member.cc',
-        'prefs/pref_member.h',
-        'prefs/pref_notifier.h',
-        'prefs/pref_notifier_impl.cc',
-        'prefs/pref_notifier_impl.h',
-        'prefs/pref_observer.h',
-        'prefs/pref_registry.cc',
-        'prefs/pref_registry.h',
-        'prefs/pref_registry_simple.cc',
-        'prefs/pref_registry_simple.h',
-        'prefs/pref_service.cc',
-        'prefs/pref_service.h',
-        'prefs/pref_service_factory.cc',
-        'prefs/pref_service_factory.h',
-        'prefs/pref_store.cc',
-        'prefs/pref_store.h',
-        'prefs/pref_value_map.cc',
-        'prefs/pref_value_map.h',
-        'prefs/pref_value_store.cc',
-        'prefs/pref_value_store.h',
-        'prefs/scoped_user_pref_update.cc',
-        'prefs/scoped_user_pref_update.h',
-        'prefs/value_map_pref_store.cc',
-        'prefs/value_map_pref_store.h',
-        'prefs/writeable_pref_store.h',
-      ],
-      'includes': [
-        '../build/android/increase_size_for_speed.gypi',
-      ],
-    },
-    {
-      'target_name': 'base_prefs_test_support',
-      'type': 'static_library',
-      'dependencies': [
-        'base',
-        'base_prefs',
-        '../testing/gmock.gyp:gmock',
-      ],
-      'sources': [
-        'prefs/mock_pref_change_callback.cc',
-        'prefs/pref_store_observer_mock.cc',
-        'prefs/pref_store_observer_mock.h',
-        'prefs/testing_pref_service.cc',
-        'prefs/testing_pref_service.h',
-        'prefs/testing_pref_store.cc',
-        'prefs/testing_pref_store.h',
-      ],
-    },
-    {
       # This is the subset of files from base that should not be used with a
       # dynamic library. Note that this library cannot depend on base because
       # base depends on base_static.
@@ -444,6 +380,7 @@
         'big_endian_unittest.cc',
         'bind_unittest.cc',
         'bind_unittest.nc',
+        'bit_cast_unittest.cc',
         'bits_unittest.cc',
         'build_time_unittest.cc',
         'callback_helpers_unittest.cc',
@@ -470,7 +407,7 @@
         'deferred_sequenced_task_runner_unittest.cc',
         'environment_unittest.cc',
         'feature_list_unittest.cc',
-        'file_version_info_unittest.cc',
+        'file_version_info_win_unittest.cc',
         'files/dir_reader_posix_unittest.cc',
         'files/file_locking_unittest.cc',
         'files/file_path_unittest.cc',
@@ -513,8 +450,8 @@
         'mac/call_with_eh_frame_unittest.mm',
         'mac/dispatch_source_mach_unittest.cc',
         'mac/foundation_util_unittest.mm',
-        'mac/libdispatch_task_runner_unittest.cc',
         'mac/mac_util_unittest.mm',
+        'mac/mach_port_broker_unittest.cc',
         'mac/objc_property_releaser_unittest.mm',
         'mac/scoped_nsobject_unittest.mm',
         'mac/scoped_objc_class_swizzler_unittest.mm',
@@ -530,11 +467,10 @@
         'memory/ptr_util_unittest.cc',
         'memory/ref_counted_memory_unittest.cc',
         'memory/ref_counted_unittest.cc',
-        'memory/scoped_ptr_unittest.cc',
-        'memory/scoped_ptr_unittest.nc',
         'memory/scoped_vector_unittest.cc',
-        'memory/shared_memory_unittest.cc',
         'memory/shared_memory_mac_unittest.cc',
+        'memory/shared_memory_unittest.cc',
+        'memory/shared_memory_win_unittest.cc',
         'memory/singleton_unittest.cc',
         'memory/weak_ptr_unittest.cc',
         'memory/weak_ptr_unittest.nc',
@@ -551,6 +487,9 @@
         'metrics/histogram_snapshot_manager_unittest.cc',
         'metrics/histogram_unittest.cc',
         'metrics/metrics_hashes_unittest.cc',
+        'metrics/persistent_histogram_allocator_unittest.cc',
+        'metrics/persistent_memory_allocator_unittest.cc',
+        'metrics/persistent_sample_map_unittest.cc',
         'metrics/sample_map_unittest.cc',
         'metrics/sample_vector_unittest.cc',
         'metrics/sparse_histogram_unittest.cc',
@@ -558,23 +497,13 @@
         'native_library_unittest.cc',
         'numerics/safe_numerics_unittest.cc',
         'observer_list_unittest.cc',
+        'optional_unittest.cc',
         'os_compat_android_unittest.cc',
         'path_service_unittest.cc',
         'pickle_unittest.cc',
         'posix/file_descriptor_shuffle_unittest.cc',
         'posix/unix_domain_socket_linux_unittest.cc',
         'power_monitor/power_monitor_unittest.cc',
-        'prefs/default_pref_store_unittest.cc',
-        'prefs/json_pref_store_unittest.cc',
-        'prefs/mock_pref_change_callback.h',
-        'prefs/overlay_user_pref_store_unittest.cc',
-        'prefs/pref_change_registrar_unittest.cc',
-        'prefs/pref_member_unittest.cc',
-        'prefs/pref_notifier_impl_unittest.cc',
-        'prefs/pref_service_unittest.cc',
-        'prefs/pref_value_map_unittest.cc',
-        'prefs/pref_value_store_unittest.cc',
-        'prefs/scoped_user_pref_update_unittest.cc',
         'process/memory_unittest.cc',
         'process/memory_unittest_mac.h',
         'process/memory_unittest_mac.mm',
@@ -585,6 +514,7 @@
         'profiler/stack_sampling_profiler_unittest.cc',
         'profiler/tracked_time_unittest.cc',
         'rand_util_unittest.cc',
+        'run_loop_unittest.cc',
         'scoped_clear_errno_unittest.cc',
         'scoped_generic_unittest.cc',
         'scoped_native_library_unittest.cc',
@@ -612,12 +542,28 @@
         'synchronization/cancellation_flag_unittest.cc',
         'synchronization/condition_variable_unittest.cc',
         'synchronization/lock_unittest.cc',
+        'synchronization/read_write_lock_unittest.cc',
         'synchronization/waitable_event_unittest.cc',
         'synchronization/waitable_event_watcher_unittest.cc',
+        'sys_byteorder_unittest.cc',
         'sys_info_unittest.cc',
         'system_monitor/system_monitor_unittest.cc',
         'task/cancelable_task_tracker_unittest.cc',
         'task_runner_util_unittest.cc',
+        'task_scheduler/delayed_task_manager_unittest.cc',
+        'task_scheduler/priority_queue_unittest.cc',
+        'task_scheduler/scheduler_lock_unittest.cc',
+        'task_scheduler/scheduler_service_thread_unittest.cc',
+        'task_scheduler/scheduler_worker_unittest.cc',
+        'task_scheduler/scheduler_worker_pool_impl_unittest.cc',
+        'task_scheduler/scheduler_worker_stack_unittest.cc',
+        'task_scheduler/sequence_sort_key_unittest.cc',
+        'task_scheduler/sequence_unittest.cc',
+        'task_scheduler/task_scheduler_impl_unittest.cc',
+        'task_scheduler/task_tracker_unittest.cc',
+        'task_scheduler/test_task_factory.cc',
+        'task_scheduler/test_task_factory.h',
+        'task_scheduler/test_utils.h',
         'template_util_unittest.cc',
         'test/histogram_tester_unittest.cc',
         'test/test_pending_task_unittest.cc',
@@ -668,7 +614,9 @@
         'win/scoped_variant_unittest.cc',
         'win/shortcut_unittest.cc',
         'win/startup_information_unittest.cc',
+        'win/wait_chain_unittest.cc',
         'win/win_util_unittest.cc',
+        'win/windows_version_unittest.cc',
         'win/wrapped_window_proc_unittest.cc',
         '<@(trace_event_test_sources)',
       ],
@@ -676,8 +624,6 @@
         'base',
         'base_i18n',
         'base_message_loop_tests',
-        'base_prefs',
-        'base_prefs_test_support',
         'base_static',
         'run_all_unittests',
         'test_support_base',
@@ -693,6 +639,17 @@
         'module_dir': 'base'
       },
       'conditions': [
+        ['cfi_vptr==1 and cfi_cast==1', {
+          'defines': [
+             # TODO(krasin): remove CFI_CAST_CHECK, see https://crbug.com/626794.
+            'CFI_CAST_CHECK',
+          ],
+        }],
+        ['OS == "ios" or OS == "mac"', {
+          'dependencies': [
+            'base_unittests_arc',
+          ],
+        }],
         ['OS == "android"', {
           'dependencies': [
             'android/jni_generator/jni_generator.gyp:jni_generator_tests',
@@ -732,9 +689,6 @@
           'defines': [
             'USE_SYMBOLIZE',
           ],
-          'sources!': [
-            'file_version_info_unittest.cc',
-          ],
           'conditions': [
             [ 'desktop_linux==1', {
               'sources': [
@@ -761,14 +715,7 @@
           'dependencies': [
             'malloc_wrapper',
           ],
-          'conditions': [
-            ['use_allocator!="none"', {
-              'dependencies': [
-                'allocator/allocator.gyp:allocator',
-              ],
-            }],
-          ]},
-        ],
+        }],
         [ 'OS == "win" and target_arch == "x64"', {
           'sources': [
             'profiler/win32_stack_frame_unwinder_unittest.cc',
@@ -778,6 +725,9 @@
           ],
         }],
         ['OS == "win"', {
+          'dependencies': [
+            'scoped_handle_test_dll'
+          ],
           'sources!': [
             'file_descriptor_shuffle_unittest.cc',
             'files/dir_reader_posix_unittest.cc',
@@ -789,16 +739,6 @@
             4267,
           ],
           'conditions': [
-            # This is needed so base_unittests uses the allocator shim, as
-            # SecurityTest.MemoryAllocationRestriction* tests are dependent
-            # on tcmalloc.
-            # TODO(wfh): crbug.com/246278 Move tcmalloc specific tests into
-            # their own test suite.
-            ['win_use_allocator_shim==1', {
-              'dependencies': [
-                'allocator/allocator.gyp:allocator',
-              ],
-            }],
             ['icu_use_data_file_flag==0', {
               # This is needed to trigger the dll copy step on windows.
               # TODO(mark): This should not be necessary.
@@ -812,6 +752,9 @@
             'third_party/libevent/libevent.gyp:libevent'
           ],
         }],
+        ['use_experimental_allocator_shim==1', {
+          'sources': [ 'allocator/allocator_shim_unittest.cc']
+        }],
       ],  # conditions
       'target_conditions': [
         ['OS == "ios" and _toolset != "host"', {
@@ -935,6 +878,8 @@
         'test/ios/wait_util.mm',
         'test/launcher/test_launcher.cc',
         'test/launcher/test_launcher.h',
+        'test/launcher/test_launcher_tracer.cc',
+        'test/launcher/test_launcher_tracer.h',
         'test/launcher/test_result.cc',
         'test/launcher/test_result.h',
         'test/launcher/test_results_tracker.cc',
@@ -965,6 +910,8 @@
         'test/perf_time_logger.h',
         'test/power_monitor_test_base.cc',
         'test/power_monitor_test_base.h',
+        'test/scoped_command_line.cc',
+        'test/scoped_command_line.h',
         'test/scoped_locale.cc',
         'test/scoped_locale.h',
         'test/scoped_path_override.cc',
@@ -992,6 +939,8 @@
         'test/test_io_thread.h',
         'test/test_listener_ios.h',
         'test/test_listener_ios.mm',
+        'test/test_message_loop.cc',
+        'test/test_message_loop.h',
         'test/test_mock_time_task_runner.cc',
         'test/test_mock_time_task_runner.h',
         'test/test_pending_task.cc',
@@ -1083,7 +1032,7 @@
     },
     {
       # GN version: //base/debug:debugging_flags
-      # Since this generates a file, it most only be referenced in the target
+      # Since this generates a file, it must only be referenced in the target
       # toolchain or there will be multiple rules that generate the header.
       # When referenced from a target that might be compiled in the host
       # toolchain, always refer to 'base_debugging_flags#target'.
@@ -1096,6 +1045,56 @@
         ],
       },
     },
+    {
+      # GN version: //base/win:base_win_features
+      # Since this generates a file, it must only be referenced in the target
+      # toolchain or there will be multiple rules that generate the header.
+      # When referenced from a target that might be compiled in the host
+      # toolchain, always refer to 'base_win_features#target'.
+      'target_name': 'base_win_features',
+      'conditions': [
+        ['OS=="win"', {
+          'includes': [ '../build/buildflag_header.gypi' ],
+          'variables': {
+            'buildflag_header_path': 'base/win/base_features.h',
+            'buildflag_flags': [
+              'SINGLE_MODULE_MODE_HANDLE_VERIFIER=<(single_module_mode_handle_verifier)',
+            ],
+          },
+        }, {
+          'type': 'none',
+        }],
+      ],
+    },
+    {
+      'type': 'none',
+      'target_name': 'base_build_date',
+      'hard_dependency': 1,
+      'actions': [{
+        'action_name': 'generate_build_date_headers',
+        'inputs': [
+          '<(DEPTH)/build/write_build_date_header.py',
+          '<(DEPTH)/build/util/LASTCHANGE'
+        ],
+        'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h' ],
+        'action': [
+          'python', '<(DEPTH)/build/write_build_date_header.py',
+          '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h',
+          '<(build_type)'
+        ]
+      }],
+      'conditions': [
+        [ 'buildtype == "Official"', {
+          'variables': {
+            'build_type': 'official'
+          }
+        }, {
+          'variables': {
+            'build_type': 'default'
+          }
+        }],
+      ]
+    },
   ],
   'conditions': [
     ['OS=="ios" and "<(GENERATOR)"=="ninja"', {
@@ -1151,6 +1150,7 @@
             'base_target': 1,
           },
           'dependencies': [
+            'base_build_date',
             'base_debugging_flags#target',
             'base_static_win64',
             '../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
@@ -1192,6 +1192,8 @@
                 'cfgmgr32.lib',
                 'powrprof.lib',
                 'setupapi.lib',
+                'userenv.lib',
+                'winmm.lib',
               ],
             },
           },
@@ -1208,6 +1210,8 @@
                   'cfgmgr32.lib',
                   'powrprof.lib',
                   'setupapi.lib',
+                  'userenv.lib',
+                  'winmm.lib',
                 ],
               },
             },
@@ -1413,6 +1417,7 @@
             'android/java/src/org/chromium/base/ApplicationStatus.java',
             'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
             'android/java/src/org/chromium/base/BuildInfo.java',
+            'android/java/src/org/chromium/base/Callback.java',
             'android/java/src/org/chromium/base/CommandLine.java',
             'android/java/src/org/chromium/base/ContentUriUtils.java',
             'android/java/src/org/chromium/base/ContextUtils.java',
@@ -1480,18 +1485,15 @@
           'includes': [ '../build/android/java_cpp_template.gypi' ],
         },
         {
-          # GN: //base:base_multidex_gen
-          'target_name': 'base_multidex_gen',
+          # GN: //base:base_build_config_gen
+          'target_name': 'base_build_config_gen',
           'type': 'none',
           'sources': [
-            'android/java/templates/ChromiumMultiDex.template',
+            'android/java/templates/BuildConfig.template',
           ],
           'variables': {
-            'package_name': 'org/chromium/base/multidex',
+            'package_name': 'org/chromium/base',
             'template_deps': [],
-            'additional_gcc_preprocess_options': [
-              '--defines', 'MULTIDEX_CONFIGURATION_<(CONFIGURATION_NAME)',
-            ],
           },
           'includes': ['../build/android/java_cpp_template.gypi'],
         },
@@ -1510,18 +1512,26 @@
           'type': 'none',
           'variables': {
             'java_in_dir': 'android/java',
-            'jar_excluded_classes': [ '*/NativeLibraries.class' ],
+            'jar_excluded_classes': [
+              '*/BuildConfig.class',
+              '*/NativeLibraries.class',
+            ],
           },
           'dependencies': [
             'base_java_application_state',
             'base_java_library_load_from_apk_status_codes',
             'base_java_library_process_type',
             'base_java_memory_pressure_level',
-            'base_multidex_gen',
+            'base_build_config_gen',
             'base_native_libraries_gen',
             '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
             '../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
           ],
+          'all_dependent_settings': {
+            'variables': {
+              'generate_build_config': 1,
+            },
+          },
           'includes': [ '../build/java.gypi' ],
         },
         {
@@ -1583,6 +1593,7 @@
           'target_name': 'base_junit_test_support',
           'type': 'none',
           'dependencies': [
+            'base_build_config_gen',
             '../testing/android/junit/junit_test.gyp:junit_test_support',
             '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
           ],
@@ -1604,13 +1615,21 @@
             '../testing/android/junit/junit_test.gyp:junit_test_support',
           ],
           'variables': {
-             'main_class': 'org.chromium.testing.local.JunitTestMain',
-             'src_paths': [
-               '../base/android/junit/',
-               '../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
-             ],
-           },
-          'includes': [ '../build/host_jar.gypi' ],
+            'main_class': 'org.chromium.testing.local.JunitTestMain',
+            'src_paths': [
+              '../base/android/junit/',
+              '../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java',
+            ],
+            'test_type': 'junit',
+            'wrapper_script_name': 'helper/<(_target_name)',
+          },
+          'includes': [
+            '../build/android/test_runner.gypi',
+            '../build/host_jar.gypi',
+          ],
         },
         {
           # GN: //base:base_javatests
@@ -1722,6 +1741,16 @@
             },
           },
         },
+        {
+          'target_name': 'scoped_handle_test_dll',
+          'type': 'loadable_module',
+          'dependencies': [
+            'base',
+          ],
+          'sources': [
+            'win/scoped_handle_test_dll.cc',
+          ],
+        },
       ],
     }],
     ['test_isolation_mode != "noop"', {
@@ -1741,5 +1770,32 @@
         },
       ],
     }],
+    ['OS == "ios" or OS == "mac"', {
+      'targets': [
+        {
+          'target_name': 'base_unittests_arc',
+          'type': 'static_library',
+          'dependencies': [
+            'base',
+            '../testing/gtest.gyp:gtest',
+          ],
+          'sources': [
+            'mac/bind_objc_block_unittest_arc.mm',
+            'mac/scoped_nsobject_unittest_arc.mm'
+          ],
+          'xcode_settings': {
+            'CLANG_ENABLE_OBJC_ARC': 'YES',
+          },
+          'target_conditions': [
+            ['OS == "ios" and _toolset != "host"', {
+              'sources/': [
+                ['include', 'mac/bind_objc_block_unittest_arc\\.mm$'],
+                ['include', 'mac/scoped_nsobject_unittest_arc\\.mm$'],
+              ],
+            }]
+          ],
+        },
+      ],
+    }],
   ],
 }
diff --git a/base/base.gypi b/base/base.gypi
index bb028bd..cb41e79 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -32,6 +32,8 @@
           'android/base_jni_registrar.h',
           'android/build_info.cc',
           'android/build_info.h',
+          'android/callback_android.cc',
+          'android/callback_android.h',
           'android/command_line_android.cc',
           'android/command_line_android.h',
           'android/content_uri_utils.cc',
@@ -119,7 +121,6 @@
           'bind_helpers.cc',
           'bind_helpers.h',
           'bind_internal.h',
-          'bind_internal_win.h',
           'bit_cast.h',
           'bits.h',
           'build_time.cc',
@@ -240,8 +241,6 @@
           'gtest_prod_util.h',
           'guid.cc',
           'guid.h',
-          'guid_posix.cc',
-          'guid_win.cc',
           'hash.cc',
           'hash.h',
           'id_map.h',
@@ -298,14 +297,16 @@
           'mac/launch_services_util.h',
           'mac/launchd.cc',
           'mac/launchd.h',
-          'mac/libdispatch_task_runner.cc',
-          'mac/libdispatch_task_runner.h',
-          'mac/mac_logging.cc',
           'mac/mac_logging.h',
+          'mac/mac_logging.mm',
           'mac/mac_util.h',
           'mac/mac_util.mm',
           'mac/mach_logging.cc',
           'mac/mach_logging.h',
+          'mac/mach_port_broker.h',
+          'mac/mach_port_broker.mm',
+          'mac/mach_port_util.cc',
+          'mac/mach_port_util.h',
           'mac/objc_property_releaser.h',
           'mac/objc_property_releaser.mm',
           'mac/os_crash_dumps.cc',
@@ -314,6 +315,7 @@
           'mac/scoped_authorizationref.h',
           'mac/scoped_block.h',
           'mac/scoped_cftyperef.h',
+          'mac/scoped_dispatch_object.h',
           'mac/scoped_ioobject.h',
           'mac/scoped_ioplugininterface.h',
           'mac/scoped_launch_data.h',
@@ -324,6 +326,7 @@
           'mac/scoped_nsautorelease_pool.h',
           'mac/scoped_nsautorelease_pool.mm',
           'mac/scoped_nsobject.h',
+          'mac/scoped_nsobject.mm',
           'mac/scoped_objc_class_swizzler.h',
           'mac/scoped_objc_class_swizzler.mm',
           'mac/scoped_sending_event.h',
@@ -342,6 +345,7 @@
           'memory/discardable_memory_allocator.h',
           'memory/discardable_shared_memory.cc',
           'memory/discardable_shared_memory.h',
+          'memory/free_deleter.h',
           'memory/linked_ptr.h',
           'memory/manual_constructor.h',
           'memory/memory_pressure_listener.cc',
@@ -362,7 +366,6 @@
           'memory/ref_counted_memory.cc',
           'memory/ref_counted_memory.h',
           'memory/scoped_policy.h',
-          'memory/scoped_ptr.h',
           'memory/scoped_vector.h',
           'memory/shared_memory.h',
           'memory/shared_memory_android.cc',
@@ -408,6 +411,12 @@
           'metrics/histogram_snapshot_manager.h',
           'metrics/metrics_hashes.cc',
           'metrics/metrics_hashes.h',
+          'metrics/persistent_histogram_allocator.cc',
+          'metrics/persistent_histogram_allocator.h',
+          'metrics/persistent_memory_allocator.cc',
+          'metrics/persistent_memory_allocator.h',
+          'metrics/persistent_sample_map.cc',
+          'metrics/persistent_sample_map.h',
           'metrics/sample_map.cc',
           'metrics/sample_map.h',
           'metrics/sample_vector.cc',
@@ -419,7 +428,6 @@
           'metrics/user_metrics.cc',
           'metrics/user_metrics.h',
           'metrics/user_metrics_action.h',
-          'move.h',
           'native_library.h',
           'native_library_ios.mm',
           'native_library_mac.mm',
@@ -435,6 +443,7 @@
           'numerics/safe_math_impl.h',
           'observer_list.h',
           'observer_list_threadsafe.h',
+          'optional.h',
           'os_compat_android.cc',
           'os_compat_android.h',
           'os_compat_nacl.cc',
@@ -518,8 +527,6 @@
           'process/process_metrics_win.cc',
           'process/process_posix.cc',
           'process/process_win.cc',
-          'profiler/alternate_timer.cc',
-          'profiler/alternate_timer.h',
           'profiler/native_stack_sampler.cc',
           'profiler/native_stack_sampler.h',
           'profiler/native_stack_sampler_posix.cc',
@@ -549,9 +556,8 @@
           'sequenced_task_runner.cc',
           'sequenced_task_runner.h',
           'sequenced_task_runner_helpers.h',
+          'sha1.cc',
           'sha1.h',
-          'sha1_portable.cc',
-          'sha1_win.cc',
           'single_thread_task_runner.h',
           'stl_util.h',
           'strings/latin1_string_conversions.cc',
@@ -601,6 +607,10 @@
           'synchronization/lock_impl.h',
           'synchronization/lock_impl_posix.cc',
           'synchronization/lock_impl_win.cc',
+          'synchronization/read_write_lock.h',
+          'synchronization/read_write_lock_nacl.cc',
+          'synchronization/read_write_lock_posix.cc',
+          'synchronization/read_write_lock_win.cc',
           'synchronization/spin_wait.h',
           'synchronization/waitable_event.h',
           'synchronization/waitable_event_posix.cc',
@@ -617,7 +627,7 @@
           'sys_info_internal.h',
           'sys_info_ios.mm',
           'sys_info_linux.cc',
-          'sys_info_mac.cc',
+          'sys_info_mac.mm',
           'sys_info_openbsd.cc',
           'sys_info_posix.cc',
           'sys_info_win.cc',
@@ -628,6 +638,36 @@
           'task_runner.cc',
           'task_runner.h',
           'task_runner_util.h',
+          'task_scheduler/delayed_task_manager.cc',
+          'task_scheduler/delayed_task_manager.h',
+          'task_scheduler/priority_queue.cc',
+          'task_scheduler/priority_queue.h',
+          'task_scheduler/scheduler_lock.h',
+          'task_scheduler/scheduler_lock_impl.cc',
+          'task_scheduler/scheduler_lock_impl.h',
+          'task_scheduler/scheduler_service_thread.cc',
+          'task_scheduler/scheduler_service_thread.h',
+          'task_scheduler/scheduler_worker.cc',
+          'task_scheduler/scheduler_worker.h',
+          'task_scheduler/scheduler_worker_pool.h',
+          'task_scheduler/scheduler_worker_pool_impl.cc',
+          'task_scheduler/scheduler_worker_pool_impl.h',
+          'task_scheduler/scheduler_worker_stack.cc',
+          'task_scheduler/scheduler_worker_stack.h',
+          'task_scheduler/sequence.cc',
+          'task_scheduler/sequence.h',
+          'task_scheduler/sequence_sort_key.cc',
+          'task_scheduler/sequence_sort_key.h',
+          'task_scheduler/task.cc',
+          'task_scheduler/task.h',
+          'task_scheduler/task_scheduler.cc',
+          'task_scheduler/task_scheduler.h',
+          'task_scheduler/task_scheduler_impl.cc',
+          'task_scheduler/task_scheduler_impl.h',
+          'task_scheduler/task_tracker.cc',
+          'task_scheduler/task_tracker.h',
+          'task_scheduler/task_traits.cc',
+          'task_scheduler/task_traits.h',
           'template_util.h',
           'third_party/dmg_fp/dmg_fp.h',
           'third_party/dmg_fp/dtoa_wrapper.cc',
@@ -638,8 +678,6 @@
           'third_party/nspr/prtime.h',
           'third_party/superfasthash/superfasthash.c',
           'third_party/xdg_mime/xdgmime.h',
-          'thread_task_runner_handle.cc',
-          'thread_task_runner_handle.h',
           'threading/non_thread_safe.h',
           'threading/non_thread_safe_impl.cc',
           'threading/non_thread_safe_impl.h',
@@ -678,6 +716,8 @@
           'threading/thread_local_win.cc',
           'threading/thread_restrictions.cc',
           'threading/thread_restrictions.h',
+          'threading/thread_task_runner_handle.cc',
+          'threading/thread_task_runner_handle.h',
           'threading/watchdog.cc',
           'threading/watchdog.h',
           'threading/worker_pool.cc',
@@ -763,6 +803,8 @@
           'win/shortcut.h',
           'win/startup_information.cc',
           'win/startup_information.h',
+          'win/wait_chain.cc',
+          'win/wait_chain.h',
           'win/win_util.cc',
           'win/win_util.h',
           'win/windows_version.cc',
@@ -836,6 +878,7 @@
                'process/process_posix.cc',
                'rand_util_posix.cc',
                'scoped_native_library.cc',
+               'synchronization/read_write_lock_posix.cc',
                'sys_info.cc',
                'sys_info_posix.cc',
                'third_party/dynamic_annotations/dynamic_annotations.c',
@@ -893,6 +936,7 @@
               ['include', '^mac/mac_logging\\.'],
               ['include', '^mac/mach_logging\\.'],
               ['include', '^mac/objc_property_releaser\\.'],
+              ['include', '^mac/scoped_block\\.'],
               ['include', '^mac/scoped_mach_port\\.'],
               ['include', '^mac/scoped_mach_vm\\.'],
               ['include', '^mac/scoped_nsautorelease_pool\\.'],
@@ -914,6 +958,7 @@
               # Exclude unsupported features on iOS.
               ['exclude', '^files/file_path_watcher.*'],
               ['exclude', '^threading/platform_thread_internal_posix\\.(h|cc)'],
+              ['exclude', '^trace_event/malloc_dump_provider\\.(h|cc)$'],
             ],
             'sources': [
               'process/memory_stubs.cc',
@@ -960,9 +1005,6 @@
               'files/file_path_watcher_stub.cc',
               'message_loop/message_pump_libevent.cc',
               'posix/file_descriptor_shuffle.cc',
-              # Not using sha1_win.cc because it may have caused a
-              # regression to page cycler moz.
-              'sha1_win.cc',
               'strings/string16.cc',
             ],
           },],
diff --git a/base/base.isolate b/base/base.isolate
index e2d8bea..079d07d 100644
--- a/base/base.isolate
+++ b/base/base.isolate
@@ -27,14 +27,6 @@
         ],
       },
     }],
-    ['OS=="win"', {
-      # Required for base/stack_trace_win.cc to symbolize correctly.
-      'variables': {
-        'files': [
-          '<(PRODUCT_DIR)/dbghelp.dll',
-        ],
-      },
-    }],
     ['OS=="win" and asan==1 and component=="shared_library"', {
       'variables': {
         'files': [
diff --git a/base/base_nacl.gyp b/base/base_nacl.gyp
index 675cbd6..30763d4 100644
--- a/base/base_nacl.gyp
+++ b/base/base_nacl.gyp
@@ -40,7 +40,9 @@
             ],
           },
           'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
             'base.gyp:base_debugging_flags',
+            'base.gyp:base_build_date',
           ],
         },
         {
@@ -63,6 +65,8 @@
             ],
           },
           'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
+            'base.gyp:base_build_date',
             '../third_party/icu/icu_nacl.gyp:icudata_nacl',
             '../third_party/icu/icu_nacl.gyp:icui18n_nacl',
             '../third_party/icu/icu_nacl.gyp:icuuc_nacl',
@@ -117,7 +121,9 @@
             'rand_util_nacl.cc',
           ],
           'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
             'base.gyp:base_debugging_flags',
+            'base.gyp:base_build_date',
             'third_party/libevent/libevent_nacl_nonsfi.gyp:event_nacl_nonsfi',
           ],
         },
@@ -141,6 +147,7 @@
             ],
           },
           'dependencies': [
+            'base.gyp:base_build_date',
             'base_nacl_nonsfi',
             '../testing/gtest_nacl.gyp:gtest_nacl',
           ],
diff --git a/base/base_switches.cc b/base/base_switches.cc
index 02b2229..f5c6eb3 100644
--- a/base/base_switches.cc
+++ b/base/base_switches.cc
@@ -20,6 +20,10 @@
 // the memory-infra category is enabled.
 const char kEnableHeapProfiling[]           = "enable-heap-profiling";
 
+// Report native (walk the stack) allocation traces. By default pseudo stacks
+// derived from trace events are reported.
+const char kEnableHeapProfilingModeNative[] = "native";
+
 // Generates full memory crash dump.
 const char kFullMemoryCrashReport[]         = "full-memory-crash-report";
 
@@ -46,6 +50,11 @@
 // to the test framework that the current process is a child process.
 const char kTestChildProcess[]              = "test-child-process";
 
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process should not initialize ICU to
+// avoid creating any scoped handles too early in startup.
+const char kTestDoNotInitializeIcu[]        = "test-do-not-initialize-icu";
+
 // Gives the default maximal active V-logging level; 0 is the default.
 // Normally positive values are used for V-logging levels.
 const char kV[]                             = "v";
@@ -80,6 +89,16 @@
 // chrome://profiler.
 const char kProfilerTimingDisabledValue[]   = "0";
 
+// Specifies a location for profiling output. This will only work if chrome has
+// been built with the gyp variable profiling=1 or gn arg enable_profiling=true.
+//
+//   {pid} if present will be replaced by the pid of the process.
+//   {count} if present will be incremented each time a profile is generated
+//           for this process.
+// The default is chrome-profile-{pid} for the browser and test-profile-{pid}
+// for tests.
+const char kProfilingFile[] = "profiling-file";
+
 #if defined(OS_WIN)
 // Disables the USB keyboard detection for blocking the OSK on Win8+.
 const char kDisableUsbKeyboardDetect[]      = "disable-usb-keyboard-detect";
diff --git a/base/base_switches.h b/base/base_switches.h
index c97a629..0585186 100644
--- a/base/base_switches.h
+++ b/base/base_switches.h
@@ -15,13 +15,16 @@
 extern const char kDisableLowEndDeviceMode[];
 extern const char kEnableCrashReporter[];
 extern const char kEnableHeapProfiling[];
+extern const char kEnableHeapProfilingModeNative[];
 extern const char kEnableLowEndDeviceMode[];
 extern const char kForceFieldTrials[];
 extern const char kFullMemoryCrashReport[];
 extern const char kNoErrorDialogs[];
 extern const char kProfilerTiming[];
 extern const char kProfilerTimingDisabledValue[];
+extern const char kProfilingFile[];
 extern const char kTestChildProcess[];
+extern const char kTestDoNotInitializeIcu[];
 extern const char kTraceToFile[];
 extern const char kTraceToFileName[];
 extern const char kV[];
diff --git a/base/bind.h b/base/bind.h
index 770e457..9cf65b6 100644
--- a/base/bind.h
+++ b/base/bind.h
@@ -6,7 +6,6 @@
 #define BASE_BIND_H_
 
 #include "base/bind_internal.h"
-#include "base/callback_internal.h"
 
 // -----------------------------------------------------------------------------
 // Usage documentation
@@ -22,78 +21,21 @@
 // If you're reading the implementation, before proceeding further, you should
 // read the top comment of base/bind_internal.h for a definition of common
 // terms and concepts.
-//
-// RETURN TYPES
-//
-// Though Bind()'s result is meant to be stored in a Callback<> type, it
-// cannot actually return the exact type without requiring a large amount
-// of extra template specializations. The problem is that in order to
-// discern the correct specialization of Callback<>, Bind would need to
-// unwrap the function signature to determine the signature's arity, and
-// whether or not it is a method.
-//
-// Each unique combination of (arity, function_type, num_prebound) where
-// function_type is one of {function, method, const_method} would require
-// one specialization.  We eventually have to do a similar number of
-// specializations anyways in the implementation (see the Invoker<>,
-// classes).  However, it is avoidable in Bind if we return the result
-// via an indirection like we do below.
-//
-// TODO(ajwong): We might be able to avoid this now, but need to test.
-//
-// It is possible to move most of the static_assert into BindState<>, but it
-// feels a little nicer to have the asserts here so people do not need to crack
-// open bind_internal.h.  On the other hand, it makes Bind() harder to read.
 
 namespace base {
 
 template <typename Functor, typename... Args>
-base::Callback<
-    typename internal::BindState<
-        typename internal::FunctorTraits<Functor>::RunnableType,
-        typename internal::FunctorTraits<Functor>::RunType,
-        typename internal::CallbackParamTraits<Args>::StorageType...>
-            ::UnboundRunType>
-Bind(Functor functor, const Args&... args) {
-  // Type aliases for how to store and run the functor.
-  using RunnableType = typename internal::FunctorTraits<Functor>::RunnableType;
-  using RunType = typename internal::FunctorTraits<Functor>::RunType;
+inline base::Callback<MakeUnboundRunType<Functor, Args...>> Bind(
+    Functor&& functor,
+    Args&&... args) {
+  using BindState = internal::MakeBindStateType<Functor, Args...>;
+  using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
+  using Invoker = internal::Invoker<BindState, UnboundRunType>;
 
-  // Use RunnableType::RunType instead of RunType above because our
-  // checks below for bound references need to know what the actual
-  // functor is going to interpret the argument as.
-  using BoundRunType = typename RunnableType::RunType;
-
-  using BoundArgs =
-      internal::TakeTypeListItem<sizeof...(Args),
-                                 internal::ExtractArgs<BoundRunType>>;
-
-  // Do not allow binding a non-const reference parameter. Non-const reference
-  // parameters are disallowed by the Google style guide.  Also, binding a
-  // non-const reference parameter can make for subtle bugs because the
-  // invoked function will receive a reference to the stored copy of the
-  // argument and not the original.
-  static_assert(!internal::HasNonConstReferenceItem<BoundArgs>::value,
-                "do not bind functions with nonconst ref");
-
-  const bool is_method = internal::HasIsMethodTag<RunnableType>::value;
-
-  // For methods, we need to be careful for parameter 1.  We do not require
-  // a scoped_refptr because BindState<> itself takes care of AddRef() for
-  // methods. We also disallow binding of an array as the method's target
-  // object.
-  static_assert(!internal::BindsArrayToFirstArg<is_method, Args...>::value,
-                "first bound argument to method cannot be array");
-  static_assert(
-      !internal::HasRefCountedParamAsRawPtr<is_method, Args...>::value,
-      "a parameter is a refcounted type and needs scoped_refptr");
-
-  using BindState = internal::BindState<
-      RunnableType, RunType,
-      typename internal::CallbackParamTraits<Args>::StorageType...>;
-
-  return Callback<typename BindState::UnboundRunType>(
-      new BindState(internal::MakeRunnable(functor), args...));
+  using CallbackType = Callback<UnboundRunType>;
+  return CallbackType(new BindState(std::forward<Functor>(functor),
+                                    std::forward<Args>(args)...),
+                      &Invoker::Run);
 }
 
 }  // namespace base
diff --git a/base/bind_helpers.h b/base/bind_helpers.h
index b97558c..93d02e3 100644
--- a/base/bind_helpers.h
+++ b/base/bind_helpers.h
@@ -28,6 +28,9 @@
 // argument will CHECK() because the first invocation would have already
 // transferred ownership to the target function.
 //
+// RetainedRef() accepts a ref counted object and retains a reference to it.
+// When the callback is called, the object is passed as a raw pointer.
+//
 // ConstRef() allows binding a constant reference to an argument rather
 // than a copy.
 //
@@ -71,6 +74,19 @@
 // Without Owned(), someone would have to know to delete |pn| when the last
 // reference to the Callback is deleted.
 //
+// EXAMPLE OF RetainedRef():
+//
+//    void foo(RefCountedBytes* bytes) {}
+//
+//    scoped_refptr<RefCountedBytes> bytes = ...;
+//    Closure callback = Bind(&foo, base::RetainedRef(bytes));
+//    callback.Run();
+//
+// Without RetainedRef, the scoped_refptr would try to implicitly convert to
+// a raw pointer and fail compilation:
+//
+//    Closure callback = Bind(&foo, bytes); // ERROR!
+//
 //
 // EXAMPLE OF ConstRef():
 //
@@ -105,10 +121,11 @@
 //
 // EXAMPLE OF Passed():
 //
-//   void TakesOwnership(scoped_ptr<Foo> arg) { }
-//   scoped_ptr<Foo> CreateFoo() { return scoped_ptr<Foo>(new Foo()); }
+//   void TakesOwnership(std::unique_ptr<Foo> arg) { }
+//   std::unique_ptr<Foo> CreateFoo() { return std::unique_ptr<Foo>(new Foo());
+//   }
 //
-//   scoped_ptr<Foo> f(new Foo());
+//   std::unique_ptr<Foo> f(new Foo());
 //
 //   // |cb| is given ownership of Foo(). |f| is now NULL.
 //   // You can use std::move(f) in place of &f, but it's more verbose.
@@ -145,158 +162,20 @@
 
 #include <stddef.h>
 
-#include <map>
-#include <memory>
 #include <type_traits>
 #include <utility>
-#include <vector>
 
 #include "base/callback.h"
 #include "base/memory/weak_ptr.h"
-#include "base/template_util.h"
 #include "build/build_config.h"
 
 namespace base {
+
+template <typename T>
+struct IsWeakReceiver;
+
 namespace internal {
 
-// Use the Substitution Failure Is Not An Error (SFINAE) trick to inspect T
-// for the existence of AddRef() and Release() functions of the correct
-// signature.
-//
-// http://en.wikipedia.org/wiki/Substitution_failure_is_not_an_error
-// http://stackoverflow.com/questions/257288/is-it-possible-to-write-a-c-template-to-check-for-a-functions-existence
-// http://stackoverflow.com/questions/4358584/sfinae-approach-comparison
-// http://stackoverflow.com/questions/1966362/sfinae-to-check-for-inherited-member-functions
-//
-// The last link in particular show the method used below.
-//
-// For SFINAE to work with inherited methods, we need to pull some extra tricks
-// with multiple inheritance.  In the more standard formulation, the overloads
-// of Check would be:
-//
-//   template <typename C>
-//   Yes NotTheCheckWeWant(Helper<&C::TargetFunc>*);
-//
-//   template <typename C>
-//   No NotTheCheckWeWant(...);
-//
-//   static const bool value = sizeof(NotTheCheckWeWant<T>(0)) == sizeof(Yes);
-//
-// The problem here is that template resolution will not match
-// C::TargetFunc if TargetFunc does not exist directly in C.  That is, if
-// TargetFunc in inherited from an ancestor, &C::TargetFunc will not match,
-// |value| will be false.  This formulation only checks for whether or
-// not TargetFunc exist directly in the class being introspected.
-//
-// To get around this, we play a dirty trick with multiple inheritance.
-// First, We create a class BaseMixin that declares each function that we
-// want to probe for.  Then we create a class Base that inherits from both T
-// (the class we wish to probe) and BaseMixin.  Note that the function
-// signature in BaseMixin does not need to match the signature of the function
-// we are probing for; thus it's easiest to just use void().
-//
-// Now, if TargetFunc exists somewhere in T, then &Base::TargetFunc has an
-// ambiguous resolution between BaseMixin and T.  This lets us write the
-// following:
-//
-//   template <typename C>
-//   No GoodCheck(Helper<&C::TargetFunc>*);
-//
-//   template <typename C>
-//   Yes GoodCheck(...);
-//
-//   static const bool value = sizeof(GoodCheck<Base>(0)) == sizeof(Yes);
-//
-// Notice here that the variadic version of GoodCheck() returns Yes here
-// instead of No like the previous one. Also notice that we calculate |value|
-// by specializing GoodCheck() on Base instead of T.
-//
-// We've reversed the roles of the variadic, and Helper overloads.
-// GoodCheck(Helper<&C::TargetFunc>*), when C = Base, fails to be a valid
-// substitution if T::TargetFunc exists. Thus GoodCheck<Base>(0) will resolve
-// to the variadic version if T has TargetFunc.  If T::TargetFunc does not
-// exist, then &C::TargetFunc is not ambiguous, and the overload resolution
-// will prefer GoodCheck(Helper<&C::TargetFunc>*).
-//
-// This method of SFINAE will correctly probe for inherited names, but it cannot
-// typecheck those names.  It's still a good enough sanity check though.
-//
-// Works on gcc-4.2, gcc-4.4, and Visual Studio 2008.
-//
-// TODO(ajwong): Move to ref_counted.h or template_util.h when we've vetted
-// this works well.
-//
-// TODO(ajwong): Make this check for Release() as well.
-// See http://crbug.com/82038.
-template <typename T>
-class SupportsAddRefAndRelease {
-  using Yes = char[1];
-  using No = char[2];
-
-  struct BaseMixin {
-    void AddRef();
-  };
-
-// MSVC warns when you try to use Base if T has a private destructor, the
-// common pattern for refcounted types. It does this even though no attempt to
-// instantiate Base is made.  We disable the warning for this definition.
-#if defined(OS_WIN)
-#pragma warning(push)
-#pragma warning(disable:4624)
-#endif
-  struct Base : public T, public BaseMixin {
-  };
-#if defined(OS_WIN)
-#pragma warning(pop)
-#endif
-
-  template <void(BaseMixin::*)()> struct Helper {};
-
-  template <typename C>
-  static No& Check(Helper<&C::AddRef>*);
-
-  template <typename >
-  static Yes& Check(...);
-
- public:
-  enum { value = sizeof(Check<Base>(0)) == sizeof(Yes) };
-};
-
-// Helpers to assert that arguments of a recounted type are bound with a
-// scoped_refptr.
-template <bool IsClasstype, typename T>
-struct UnsafeBindtoRefCountedArgHelper : false_type {
-};
-
-template <typename T>
-struct UnsafeBindtoRefCountedArgHelper<true, T>
-    : integral_constant<bool, SupportsAddRefAndRelease<T>::value> {
-};
-
-template <typename T>
-struct UnsafeBindtoRefCountedArg : false_type {
-};
-
-template <typename T>
-struct UnsafeBindtoRefCountedArg<T*>
-    : UnsafeBindtoRefCountedArgHelper<is_class<T>::value, T> {
-};
-
-template <typename T>
-class HasIsMethodTag {
-  using Yes = char[1];
-  using No = char[2];
-
-  template <typename U>
-  static Yes& Check(typename U::IsMethod*);
-
-  template <typename U>
-  static No& Check(...);
-
- public:
-  enum { value = sizeof(Check<T>(0)) == sizeof(Yes) };
-};
-
 template <typename T>
 class UnretainedWrapper {
  public:
@@ -316,22 +195,26 @@
 };
 
 template <typename T>
+class RetainedRefWrapper {
+ public:
+  explicit RetainedRefWrapper(T* o) : ptr_(o) {}
+  explicit RetainedRefWrapper(scoped_refptr<T> o) : ptr_(std::move(o)) {}
+  T* get() const { return ptr_.get(); }
+ private:
+  scoped_refptr<T> ptr_;
+};
+
+template <typename T>
 struct IgnoreResultHelper {
-  explicit IgnoreResultHelper(T functor) : functor_(functor) {}
+  explicit IgnoreResultHelper(T functor) : functor_(std::move(functor)) {}
+  explicit operator bool() const { return !!functor_; }
 
   T functor_;
 };
 
-template <typename T>
-struct IgnoreResultHelper<Callback<T> > {
-  explicit IgnoreResultHelper(const Callback<T>& functor) : functor_(functor) {}
-
-  const Callback<T>& functor_;
-};
-
 // An alternate implementation is to avoid the destructive copy, and instead
 // specialize ParamTraits<> for OwnedWrapper<> to change the StorageType to
-// a class that is essentially a scoped_ptr<>.
+// a class that is essentially a std::unique_ptr<>.
 //
 // The current implementation has the benefit though of leaving ParamTraits<>
 // fully in callback_internal.h as well as avoiding type conversions during
@@ -342,7 +225,7 @@
   explicit OwnedWrapper(T* o) : ptr_(o) {}
   ~OwnedWrapper() { delete ptr_; }
   T* get() const { return ptr_; }
-  OwnedWrapper(const OwnedWrapper& other) {
+  OwnedWrapper(OwnedWrapper&& other) {
     ptr_ = other.ptr_;
     other.ptr_ = NULL;
   }
@@ -379,9 +262,9 @@
  public:
   explicit PassedWrapper(T&& scoper)
       : is_valid_(true), scoper_(std::move(scoper)) {}
-  PassedWrapper(const PassedWrapper& other)
+  PassedWrapper(PassedWrapper&& other)
       : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-  T Pass() const {
+  T Take() const {
     CHECK(is_valid_);
     is_valid_ = false;
     return std::move(scoper_);
@@ -392,161 +275,36 @@
   mutable T scoper_;
 };
 
-// Specialize PassedWrapper for std::unique_ptr used by base::Passed().
-// Use std::move() to transfer the data from one storage to another.
-template <typename T, typename D>
-class PassedWrapper<std::unique_ptr<T, D>> {
- public:
-  explicit PassedWrapper(std::unique_ptr<T, D> scoper)
-      : is_valid_(true), scoper_(std::move(scoper)) {}
-  PassedWrapper(const PassedWrapper& other)
-      : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-
-  std::unique_ptr<T, D> Pass() const {
-    CHECK(is_valid_);
-    is_valid_ = false;
-    return std::move(scoper_);
-  }
-
- private:
-  mutable bool is_valid_;
-  mutable std::unique_ptr<T, D> scoper_;
-};
-
-// Specialize PassedWrapper for std::vector<std::unique_ptr<T>>.
-template <typename T, typename D, typename A>
-class PassedWrapper<std::vector<std::unique_ptr<T, D>, A>> {
- public:
-  explicit PassedWrapper(std::vector<std::unique_ptr<T, D>, A> scoper)
-      : is_valid_(true), scoper_(std::move(scoper)) {}
-  PassedWrapper(const PassedWrapper& other)
-      : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-
-  std::vector<std::unique_ptr<T, D>, A> Pass() const {
-    CHECK(is_valid_);
-    is_valid_ = false;
-    return std::move(scoper_);
-  }
-
- private:
-  mutable bool is_valid_;
-  mutable std::vector<std::unique_ptr<T, D>, A> scoper_;
-};
-
-// Specialize PassedWrapper for std::map<K, std::unique_ptr<T>>.
-template <typename K, typename T, typename D, typename C, typename A>
-class PassedWrapper<std::map<K, std::unique_ptr<T, D>, C, A>> {
- public:
-  explicit PassedWrapper(std::map<K, std::unique_ptr<T, D>, C, A> scoper)
-      : is_valid_(true), scoper_(std::move(scoper)) {}
-  PassedWrapper(const PassedWrapper& other)
-      : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-
-  std::map<K, std::unique_ptr<T, D>, C, A> Pass() const {
-    CHECK(is_valid_);
-    is_valid_ = false;
-    return std::move(scoper_);
-  }
-
- private:
-  mutable bool is_valid_;
-  mutable std::map<K, std::unique_ptr<T, D>, C, A> scoper_;
-};
-
 // Unwrap the stored parameters for the wrappers above.
 template <typename T>
-struct UnwrapTraits {
-  using ForwardType = const T&;
-  static ForwardType Unwrap(const T& o) { return o; }
-};
+T&& Unwrap(T&& o) {
+  return std::forward<T>(o);
+}
 
 template <typename T>
-struct UnwrapTraits<UnretainedWrapper<T> > {
-  using ForwardType = T*;
-  static ForwardType Unwrap(UnretainedWrapper<T> unretained) {
-    return unretained.get();
-  }
-};
+T* Unwrap(const UnretainedWrapper<T>& unretained) {
+  return unretained.get();
+}
 
 template <typename T>
-struct UnwrapTraits<ConstRefWrapper<T> > {
-  using ForwardType = const T&;
-  static ForwardType Unwrap(ConstRefWrapper<T> const_ref) {
-    return const_ref.get();
-  }
-};
+const T& Unwrap(const ConstRefWrapper<T>& const_ref) {
+  return const_ref.get();
+}
 
 template <typename T>
-struct UnwrapTraits<scoped_refptr<T> > {
-  using ForwardType = T*;
-  static ForwardType Unwrap(const scoped_refptr<T>& o) { return o.get(); }
-};
+T* Unwrap(const RetainedRefWrapper<T>& o) {
+  return o.get();
+}
 
 template <typename T>
-struct UnwrapTraits<WeakPtr<T> > {
-  using ForwardType = const WeakPtr<T>&;
-  static ForwardType Unwrap(const WeakPtr<T>& o) { return o; }
-};
+T* Unwrap(const OwnedWrapper<T>& o) {
+  return o.get();
+}
 
 template <typename T>
-struct UnwrapTraits<OwnedWrapper<T> > {
-  using ForwardType = T*;
-  static ForwardType Unwrap(const OwnedWrapper<T>& o) {
-    return o.get();
-  }
-};
-
-template <typename T>
-struct UnwrapTraits<PassedWrapper<T> > {
-  using ForwardType = T;
-  static T Unwrap(PassedWrapper<T>& o) {
-    return o.Pass();
-  }
-};
-
-// Utility for handling different refcounting semantics in the Bind()
-// function.
-template <bool is_method, typename... T>
-struct MaybeScopedRefPtr;
-
-template <bool is_method>
-struct MaybeScopedRefPtr<is_method> {
-  MaybeScopedRefPtr() {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<false, T, Rest...> {
-  MaybeScopedRefPtr(const T&, const Rest&...) {}
-};
-
-template <typename T, size_t n, typename... Rest>
-struct MaybeScopedRefPtr<false, T[n], Rest...> {
-  MaybeScopedRefPtr(const T*, const Rest&...) {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, T, Rest...> {
-  MaybeScopedRefPtr(const T& /* o */, const Rest&...) {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, T*, Rest...> {
-  MaybeScopedRefPtr(T* o, const Rest&...) : ref_(o) {}
-  scoped_refptr<T> ref_;
-};
-
-// No need to additionally AddRef() and Release() since we are storing a
-// scoped_refptr<> inside the storage object already.
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, scoped_refptr<T>, Rest...> {
-  MaybeScopedRefPtr(const scoped_refptr<T>&, const Rest&...) {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, const T*, Rest...> {
-  MaybeScopedRefPtr(const T* o, const Rest&...) : ref_(o) {}
-  scoped_refptr<const T> ref_;
-};
+T Unwrap(const PassedWrapper<T>& o) {
+  return o.Take();
+}
 
 // IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
 // method.  It is used internally by Bind() to select the correct
@@ -555,16 +313,11 @@
 //
 // The first argument should be the type of the object that will be received by
 // the method.
-template <bool IsMethod, typename... Args>
-struct IsWeakMethod : public false_type {};
+template <bool is_method, typename... Args>
+struct IsWeakMethod : std::false_type {};
 
 template <typename T, typename... Args>
-struct IsWeakMethod<true, WeakPtr<T>, Args...> : public true_type {};
-
-template <typename T, typename... Args>
-struct IsWeakMethod<true, ConstRefWrapper<WeakPtr<T>>, Args...>
-    : public true_type {};
-
+struct IsWeakMethod<true, T, Args...> : IsWeakReceiver<T> {};
 
 // Packs a list of types to hold them in a single type.
 template <typename... Types>
@@ -647,19 +400,25 @@
 template <typename R, typename ArgList>
 using MakeFunctionType = typename MakeFunctionTypeImpl<R, ArgList>::Type;
 
-// Used for ExtractArgs.
+// Used for ExtractArgs and ExtractReturnType.
 template <typename Signature>
 struct ExtractArgsImpl;
 
 template <typename R, typename... Args>
 struct ExtractArgsImpl<R(Args...)> {
-  using Type = TypeList<Args...>;
+  using ReturnType = R;
+  using ArgsList = TypeList<Args...>;
 };
 
 // A type-level function that extracts function arguments into a TypeList.
 // E.g. ExtractArgs<R(A, B, C)> is evaluated to TypeList<A, B, C>.
 template <typename Signature>
-using ExtractArgs = typename ExtractArgsImpl<Signature>::Type;
+using ExtractArgs = typename ExtractArgsImpl<Signature>::ArgsList;
+
+// A type-level function that extracts the return type of a function.
+// E.g. ExtractReturnType<R(A, B, C)> is evaluated to R.
+template <typename Signature>
+using ExtractReturnType = typename ExtractArgsImpl<Signature>::ReturnType;
 
 }  // namespace internal
 
@@ -669,6 +428,16 @@
 }
 
 template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
+  return internal::RetainedRefWrapper<T>(o);
+}
+
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
+  return internal::RetainedRefWrapper<T>(std::move(o));
+}
+
+template <typename T>
 static inline internal::ConstRefWrapper<T> ConstRef(const T& o) {
   return internal::ConstRefWrapper<T>(o);
 }
@@ -686,28 +455,19 @@
 // Both versions of Passed() prevent T from being an lvalue reference. The first
 // via use of enable_if, and the second takes a T* which will not bind to T&.
 template <typename T,
-          typename std::enable_if<internal::IsMoveOnlyType<T>::value &&
-                                  !std::is_lvalue_reference<T>::value>::type* =
+          typename std::enable_if<!std::is_lvalue_reference<T>::value>::type* =
               nullptr>
 static inline internal::PassedWrapper<T> Passed(T&& scoper) {
   return internal::PassedWrapper<T>(std::move(scoper));
 }
-template <typename T,
-          typename std::enable_if<internal::IsMoveOnlyType<T>::value>::type* =
-              nullptr>
+template <typename T>
 static inline internal::PassedWrapper<T> Passed(T* scoper) {
   return internal::PassedWrapper<T>(std::move(*scoper));
 }
 
 template <typename T>
 static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
-  return internal::IgnoreResultHelper<T>(data);
-}
-
-template <typename T>
-static inline internal::IgnoreResultHelper<Callback<T> >
-IgnoreResult(const Callback<T>& data) {
-  return internal::IgnoreResultHelper<Callback<T> >(data);
+  return internal::IgnoreResultHelper<T>(std::move(data));
 }
 
 BASE_EXPORT void DoNothing();
@@ -717,6 +477,26 @@
   delete obj;
 }
 
+// An injection point to control |this| pointer behavior on a method invocation.
+// If IsWeakReceiver<> is true_type for |T| and |T| is used for a receiver of a
+// method, base::Bind cancels the method invocation if the receiver is tested as
+// false.
+// E.g. Foo::bar() is not called:
+//   struct Foo : base::SupportsWeakPtr<Foo> {
+//     void bar() {}
+//   };
+//
+//   WeakPtr<Foo> oo = nullptr;
+//   base::Bind(&Foo::bar, oo).Run();
+template <typename T>
+struct IsWeakReceiver : std::false_type {};
+
+template <typename T>
+struct IsWeakReceiver<internal::ConstRefWrapper<T>> : IsWeakReceiver<T> {};
+
+template <typename T>
+struct IsWeakReceiver<WeakPtr<T>> : std::true_type {};
+
 }  // namespace base
 
 #endif  // BASE_BIND_HELPERS_H_
diff --git a/base/bind_internal.h b/base/bind_internal.h
index ac7cd00..3d6ca09 100644
--- a/base/bind_internal.h
+++ b/base/bind_internal.h
@@ -7,6 +7,7 @@
 
 #include <stddef.h>
 
+#include <tuple>
 #include <type_traits>
 
 #include "base/bind_helpers.h"
@@ -17,10 +18,6 @@
 #include "base/tuple.h"
 #include "build/build_config.h"
 
-#if defined(OS_WIN)
-#include "base/bind_internal_win.h"
-#endif
-
 namespace base {
 namespace internal {
 
@@ -28,63 +25,87 @@
 //
 //
 // CONCEPTS:
-//  Runnable -- A type (really a type class) that has a single Run() method
-//              and a RunType typedef that corresponds to the type of Run().
-//              A Runnable can declare that it should treated like a method
-//              call by including a typedef named IsMethod.  The value of
-//              this typedef is NOT inspected, only the existence.  When a
-//              Runnable declares itself a method, Bind() will enforce special
-//              refcounting + WeakPtr handling semantics for the first
-//              parameter which is expected to be an object.
-//  Functor -- A copyable type representing something that should be called.
-//             All function pointers, Callback<>, and Runnables are functors
-//             even if the invocation syntax differs.
+//  Functor -- A movable type representing something that should be called.
+//             All function pointers and Callback<> are functors even if the
+//             invocation syntax differs.
 //  RunType -- A function type (as opposed to function _pointer_ type) for
-//             a Run() function.  Usually just a convenience typedef.
+//             a Callback<>::Run().  Usually just a convenience typedef.
 //  (Bound)Args -- A set of types that stores the arguments.
 //
 // Types:
-//  RunnableAdapter<> -- Wraps the various "function" pointer types into an
-//                       object that adheres to the Runnable interface.
 //  ForceVoidReturn<> -- Helper class for translating function signatures to
 //                       equivalent forms with a "void" return type.
-//  FunctorTraits<> -- Type traits used determine the correct RunType and
-//                     RunnableType for a Functor.  This is where function
+//  FunctorTraits<> -- Type traits used to determine the correct RunType and
+//                     invocation manner for a Functor.  This is where function
 //                     signature adapters are applied.
-//  MakeRunnable<> -- Takes a Functor and returns an object in the Runnable
-//                    type class that represents the underlying Functor.
-//  InvokeHelper<> -- Take a Runnable + arguments and actully invokes it.
+//  InvokeHelper<> -- Take a Functor + arguments and actully invokes it.
 //                    Handle the differing syntaxes needed for WeakPtr<>
-//                    support, and for ignoring return values.  This is separate
-//                    from Invoker to avoid creating multiple version of
-//                    Invoker<>.
-//  Invoker<> -- Unwraps the curried parameters and executes the Runnable.
+//                    support.  This is separate from Invoker to avoid creating
+//                    multiple version of Invoker<>.
+//  Invoker<> -- Unwraps the curried parameters and executes the Functor.
 //  BindState<> -- Stores the curried parameters, and is the main entry point
-//                 into the Bind() system, doing most of the type resolution.
-//                 There are ARITY BindState types.
+//                 into the Bind() system.
 
-// HasNonConstReferenceParam selects true_type when any of the parameters in
-// |Sig| is a non-const reference.
-// Implementation note: This non-specialized case handles zero-arity case only.
-// Non-zero-arity cases should be handled by the specialization below.
-template <typename List>
-struct HasNonConstReferenceItem : false_type {};
+template <typename...>
+struct make_void {
+  using type = void;
+};
 
-// Implementation note: Select true_type if the first parameter is a non-const
-// reference.  Otherwise, skip the first parameter and check rest of parameters
-// recursively.
-template <typename T, typename... Args>
-struct HasNonConstReferenceItem<TypeList<T, Args...>>
-    : std::conditional<is_non_const_reference<T>::value,
-                       true_type,
-                       HasNonConstReferenceItem<TypeList<Args...>>>::type {};
+// A clone of C++17 std::void_t.
+// Unlike the original version, we need |make_void| as a helper struct to avoid
+// a C++14 defect.
+// ref: http://en.cppreference.com/w/cpp/types/void_t
+// ref: http://open-std.org/JTC1/SC22/WG21/docs/cwg_defects.html#1558
+template <typename... Ts>
+using void_t = typename make_void<Ts...>::type;
+
+template <typename Callable,
+          typename Signature = decltype(&Callable::operator())>
+struct ExtractCallableRunTypeImpl;
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R(Callable::*)(Args...) const> {
+  using Type = R(Args...);
+};
+
+// Evaluated to RunType of the given callable type.
+// Example:
+//   auto f = [](int, char*) { return 0.1; };
+//   ExtractCallableRunType<decltype(f)>
+//   is evaluated to
+//   double(int, char*);
+template <typename Callable>
+using ExtractCallableRunType =
+    typename ExtractCallableRunTypeImpl<Callable>::Type;
+
+// IsConvertibleToRunType<Functor> is std::true_type if |Functor| has operator()
+// and convertible to the corresponding function pointer. Otherwise, it's
+// std::false_type.
+// Example:
+//   IsConvertibleToRunType<void(*)()>::value is false.
+//
+//   struct Foo {};
+//   IsConvertibleToRunType<void(Foo::*)()>::value is false.
+//
+//   auto f = []() {};
+//   IsConvertibleToRunType<decltype(f)>::value is true.
+//
+//   int i = 0;
+//   auto g = [i]() {};
+//   IsConvertibleToRunType<decltype(g)>::value is false.
+template <typename Functor, typename SFINAE = void>
+struct IsConvertibleToRunType : std::false_type {};
+
+template <typename Callable>
+struct IsConvertibleToRunType<Callable, void_t<decltype(&Callable::operator())>>
+    : std::is_convertible<Callable, ExtractCallableRunType<Callable>*> {};
 
 // HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
 // pointer to a RefCounted type.
 // Implementation note: This non-specialized case handles zero-arity case only.
 // Non-zero-arity cases should be handled by the specialization below.
 template <typename... Args>
-struct HasRefCountedTypeAsRawPtr : false_type {};
+struct HasRefCountedTypeAsRawPtr : std::false_type {};
 
 // Implementation note: Select true_type if the first parameter is a raw pointer
 // to a RefCounted type. Otherwise, skip the first parameter and check rest of
@@ -92,117 +113,9 @@
 template <typename T, typename... Args>
 struct HasRefCountedTypeAsRawPtr<T, Args...>
     : std::conditional<NeedsScopedRefptrButGetsRawPtr<T>::value,
-                       true_type,
+                       std::true_type,
                        HasRefCountedTypeAsRawPtr<Args...>>::type {};
 
-// BindsArrayToFirstArg selects true_type when |is_method| is true and the first
-// item of |Args| is an array type.
-// Implementation note: This non-specialized case handles !is_method case and
-// zero-arity case only.  Other cases should be handled by the specialization
-// below.
-template <bool is_method, typename... Args>
-struct BindsArrayToFirstArg : false_type {};
-
-template <typename T, typename... Args>
-struct BindsArrayToFirstArg<true, T, Args...> : is_array<T> {};
-
-// HasRefCountedParamAsRawPtr is the same to HasRefCountedTypeAsRawPtr except
-// when |is_method| is true HasRefCountedParamAsRawPtr skips the first argument.
-// Implementation note: This non-specialized case handles !is_method case and
-// zero-arity case only.  Other cases should be handled by the specialization
-// below.
-template <bool is_method, typename... Args>
-struct HasRefCountedParamAsRawPtr : HasRefCountedTypeAsRawPtr<Args...> {};
-
-template <typename T, typename... Args>
-struct HasRefCountedParamAsRawPtr<true, T, Args...>
-    : HasRefCountedTypeAsRawPtr<Args...> {};
-
-// RunnableAdapter<>
-//
-// The RunnableAdapter<> templates provide a uniform interface for invoking
-// a function pointer, method pointer, or const method pointer. The adapter
-// exposes a Run() method with an appropriate signature. Using this wrapper
-// allows for writing code that supports all three pointer types without
-// undue repetition.  Without it, a lot of code would need to be repeated 3
-// times.
-//
-// For method pointers and const method pointers the first argument to Run()
-// is considered to be the received of the method.  This is similar to STL's
-// mem_fun().
-//
-// This class also exposes a RunType typedef that is the function type of the
-// Run() function.
-//
-// If and only if the wrapper contains a method or const method pointer, an
-// IsMethod typedef is exposed.  The existence of this typedef (NOT the value)
-// marks that the wrapper should be considered a method wrapper.
-
-template <typename Functor>
-class RunnableAdapter;
-
-// Function.
-template <typename R, typename... Args>
-class RunnableAdapter<R(*)(Args...)> {
- public:
-  // MSVC 2013 doesn't support Type Alias of function types.
-  // Revisit this after we update it to newer version.
-  typedef R RunType(Args...);
-
-  explicit RunnableAdapter(R(*function)(Args...))
-      : function_(function) {
-  }
-
-  R Run(typename CallbackParamTraits<Args>::ForwardType... args) {
-    return function_(CallbackForward(args)...);
-  }
-
- private:
-  R (*function_)(Args...);
-};
-
-// Method.
-template <typename R, typename T, typename... Args>
-class RunnableAdapter<R(T::*)(Args...)> {
- public:
-  // MSVC 2013 doesn't support Type Alias of function types.
-  // Revisit this after we update it to newer version.
-  typedef R RunType(T*, Args...);
-  using IsMethod = true_type;
-
-  explicit RunnableAdapter(R(T::*method)(Args...))
-      : method_(method) {
-  }
-
-  R Run(T* object, typename CallbackParamTraits<Args>::ForwardType... args) {
-    return (object->*method_)(CallbackForward(args)...);
-  }
-
- private:
-  R (T::*method_)(Args...);
-};
-
-// Const Method.
-template <typename R, typename T, typename... Args>
-class RunnableAdapter<R(T::*)(Args...) const> {
- public:
-  using RunType = R(const T*, Args...);
-  using IsMethod = true_type;
-
-  explicit RunnableAdapter(R(T::*method)(Args...) const)
-      : method_(method) {
-  }
-
-  R Run(const T* object,
-        typename CallbackParamTraits<Args>::ForwardType... args) {
-    return (object->*method_)(CallbackForward(args)...);
-  }
-
- private:
-  R (T::*method_)(Args...) const;
-};
-
-
 // ForceVoidReturn<>
 //
 // Set of templates that support forcing the function return type to void.
@@ -211,205 +124,269 @@
 
 template <typename R, typename... Args>
 struct ForceVoidReturn<R(Args...)> {
-  // MSVC 2013 doesn't support Type Alias of function types.
-  // Revisit this after we update it to newer version.
-  typedef void RunType(Args...);
+  using RunType = void(Args...);
 };
 
-
 // FunctorTraits<>
 //
 // See description at top of file.
-template <typename T>
-struct FunctorTraits {
-  using RunnableType = RunnableAdapter<T>;
-  using RunType = typename RunnableType::RunType;
+template <typename Functor, typename SFINAE = void>
+struct FunctorTraits;
+
+// For a callable type that is convertible to the corresponding function type.
+// This specialization is intended to allow binding captureless lambdas by
+// base::Bind(), based on the fact that captureless lambdas can be convertible
+// to the function type while capturing lambdas can't.
+template <typename Functor>
+struct FunctorTraits<
+    Functor,
+    typename std::enable_if<IsConvertibleToRunType<Functor>::value>::type> {
+  using RunType = ExtractCallableRunType<Functor>;
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = false;
+
+  template <typename... RunArgs>
+  static ExtractReturnType<RunType>
+  Invoke(const Functor& functor, RunArgs&&... args) {
+    return functor(std::forward<RunArgs>(args)...);
+  }
 };
 
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R (*)(Args...)> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename... RunArgs>
+  static R Invoke(R (*function)(Args...), RunArgs&&... args) {
+    return function(std::forward<RunArgs>(args)...);
+  }
+};
+
+#if defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__stdcall*)(Args...)> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename... RunArgs>
+  static R Invoke(R(__stdcall* function)(Args...), RunArgs&&... args) {
+    return function(std::forward<RunArgs>(args)...);
+  }
+};
+
+// For functions.
+template <typename R, typename... Args>
+struct FunctorTraits<R(__fastcall*)(Args...)> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename... RunArgs>
+  static R Invoke(R(__fastcall* function)(Args...), RunArgs&&... args) {
+    return function(std::forward<RunArgs>(args)...);
+  }
+};
+
+#endif  // defined(OS_WIN) && !defined(ARCH_CPU_X86_64)
+
+// For methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...)> {
+  using RunType = R(Receiver*, Args...);
+  static constexpr bool is_method = true;
+  static constexpr bool is_nullable = true;
+
+  template <typename ReceiverPtr, typename... RunArgs>
+  static R Invoke(R (Receiver::*method)(Args...),
+                  ReceiverPtr&& receiver_ptr,
+                  RunArgs&&... args) {
+    // Clang skips CV qualifier check on a method pointer invocation when the
+    // receiver is a subclass. Store the receiver into a const reference to
+    // T to ensure the CV check works.
+    // https://llvm.org/bugs/show_bug.cgi?id=27037
+    Receiver& receiver = *receiver_ptr;
+    return (receiver.*method)(std::forward<RunArgs>(args)...);
+  }
+};
+
+// For const methods.
+template <typename R, typename Receiver, typename... Args>
+struct FunctorTraits<R (Receiver::*)(Args...) const> {
+  using RunType = R(const Receiver*, Args...);
+  static constexpr bool is_method = true;
+  static constexpr bool is_nullable = true;
+
+  template <typename ReceiverPtr, typename... RunArgs>
+  static R Invoke(R (Receiver::*method)(Args...) const,
+                  ReceiverPtr&& receiver_ptr,
+                  RunArgs&&... args) {
+    // Clang skips CV qualifier check on a method pointer invocation when the
+    // receiver is a subclass. Store the receiver into a const reference to
+    // T to ensure the CV check works.
+    // https://llvm.org/bugs/show_bug.cgi?id=27037
+    const Receiver& receiver = *receiver_ptr;
+    return (receiver.*method)(std::forward<RunArgs>(args)...);
+  }
+};
+
+// For IgnoreResults.
 template <typename T>
-struct FunctorTraits<IgnoreResultHelper<T>> {
-  using RunnableType = typename FunctorTraits<T>::RunnableType;
+struct FunctorTraits<IgnoreResultHelper<T>> : FunctorTraits<T> {
   using RunType =
-      typename ForceVoidReturn<typename RunnableType::RunType>::RunType;
+      typename ForceVoidReturn<typename FunctorTraits<T>::RunType>::RunType;
+
+  template <typename IgnoreResultType, typename... RunArgs>
+  static void Invoke(IgnoreResultType&& ignore_result_helper,
+                     RunArgs&&... args) {
+    FunctorTraits<T>::Invoke(ignore_result_helper.functor_,
+                             std::forward<RunArgs>(args)...);
+  }
 };
 
-template <typename T>
-struct FunctorTraits<Callback<T>> {
-  using RunnableType = Callback<T> ;
-  using RunType = typename Callback<T>::RunType;
+// For Callbacks.
+template <typename R, typename... Args, CopyMode copy_mode>
+struct FunctorTraits<Callback<R(Args...), copy_mode>> {
+  using RunType = R(Args...);
+  static constexpr bool is_method = false;
+  static constexpr bool is_nullable = true;
+
+  template <typename CallbackType, typename... RunArgs>
+  static R Invoke(CallbackType&& callback, RunArgs&&... args) {
+    DCHECK(!callback.is_null());
+    return std::forward<CallbackType>(callback).Run(
+        std::forward<RunArgs>(args)...);
+  }
 };
 
-
-// MakeRunnable<>
-//
-// Converts a passed in functor to a RunnableType using type inference.
-
-template <typename T>
-typename FunctorTraits<T>::RunnableType MakeRunnable(const T& t) {
-  return RunnableAdapter<T>(t);
-}
-
-template <typename T>
-typename FunctorTraits<T>::RunnableType
-MakeRunnable(const IgnoreResultHelper<T>& t) {
-  return MakeRunnable(t.functor_);
-}
-
-template <typename T>
-const typename FunctorTraits<Callback<T>>::RunnableType&
-MakeRunnable(const Callback<T>& t) {
-  DCHECK(!t.is_null());
-  return t;
-}
-
-
 // InvokeHelper<>
 //
-// There are 3 logical InvokeHelper<> specializations: normal, void-return,
-// WeakCalls.
+// There are 2 logical InvokeHelper<> specializations: normal, WeakCalls.
 //
 // The normal type just calls the underlying runnable.
 //
-// We need a InvokeHelper to handle void return types in order to support
-// IgnoreResult().  Normally, if the Runnable's RunType had a void return,
-// the template system would just accept "return functor.Run()" ignoring
-// the fact that a void function is being used with return. This piece of
-// sugar breaks though when the Runnable's RunType is not void.  Thus, we
-// need a partial specialization to change the syntax to drop the "return"
-// from the invocation call.
-//
-// WeakCalls similarly need special syntax that is applied to the first
-// argument to check if they should no-op themselves.
-template <bool IsWeakCall, typename ReturnType, typename Runnable,
-          typename ArgsType>
+// WeakCalls need special syntax that is applied to the first argument to check
+// if they should no-op themselves.
+template <bool is_weak_call, typename ReturnType>
 struct InvokeHelper;
 
-template <typename ReturnType, typename Runnable, typename... Args>
-struct InvokeHelper<false, ReturnType, Runnable, TypeList<Args...>> {
-  static ReturnType MakeItSo(Runnable runnable, Args... args) {
-    return runnable.Run(CallbackForward(args)...);
+template <typename ReturnType>
+struct InvokeHelper<false, ReturnType> {
+  template <typename Functor, typename... RunArgs>
+  static inline ReturnType MakeItSo(Functor&& functor, RunArgs&&... args) {
+    using Traits = FunctorTraits<typename std::decay<Functor>::type>;
+    return Traits::Invoke(std::forward<Functor>(functor),
+                          std::forward<RunArgs>(args)...);
   }
 };
 
-template <typename Runnable, typename... Args>
-struct InvokeHelper<false, void, Runnable, TypeList<Args...>> {
-  static void MakeItSo(Runnable runnable, Args... args) {
-    runnable.Run(CallbackForward(args)...);
-  }
-};
-
-template <typename Runnable, typename BoundWeakPtr, typename... Args>
-struct InvokeHelper<true, void, Runnable, TypeList<BoundWeakPtr, Args...>> {
-  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, Args... args) {
-    if (!weak_ptr.get()) {
-      return;
-    }
-    runnable.Run(weak_ptr.get(), CallbackForward(args)...);
-  }
-};
-
-#if !defined(_MSC_VER)
-
-template <typename ReturnType, typename Runnable, typename ArgsType>
-struct InvokeHelper<true, ReturnType, Runnable, ArgsType> {
+template <typename ReturnType>
+struct InvokeHelper<true, ReturnType> {
   // WeakCalls are only supported for functions with a void return type.
   // Otherwise, the function result would be undefined if the the WeakPtr<>
   // is invalidated.
-  static_assert(is_void<ReturnType>::value,
+  static_assert(std::is_void<ReturnType>::value,
                 "weak_ptrs can only bind to methods without return values");
-};
 
-#endif
+  template <typename Functor, typename BoundWeakPtr, typename... RunArgs>
+  static inline void MakeItSo(Functor&& functor,
+                              BoundWeakPtr&& weak_ptr,
+                              RunArgs&&... args) {
+    if (!weak_ptr)
+      return;
+    using Traits = FunctorTraits<typename std::decay<Functor>::type>;
+    Traits::Invoke(std::forward<Functor>(functor),
+                   std::forward<BoundWeakPtr>(weak_ptr),
+                   std::forward<RunArgs>(args)...);
+  }
+};
 
 // Invoker<>
 //
 // See description at the top of the file.
-template <typename BoundIndices,
-          typename StorageType, typename Unwrappers,
-          typename InvokeHelperType, typename UnboundForwardRunType>
+template <typename StorageType, typename UnboundRunType>
 struct Invoker;
 
-template <size_t... bound_indices,
-          typename StorageType,
-          typename... Unwrappers,
-          typename InvokeHelperType,
-          typename R,
-          typename... UnboundForwardArgs>
-struct Invoker<IndexSequence<bound_indices...>,
-               StorageType, TypeList<Unwrappers...>,
-               InvokeHelperType, R(UnboundForwardArgs...)> {
-  static R Run(BindStateBase* base,
-               UnboundForwardArgs... unbound_args) {
-    StorageType* storage = static_cast<StorageType*>(base);
+template <typename StorageType, typename R, typename... UnboundArgs>
+struct Invoker<StorageType, R(UnboundArgs...)> {
+  static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
     // Local references to make debugger stepping easier. If in a debugger,
     // you really want to warp ahead and step through the
     // InvokeHelper<>::MakeItSo() call below.
-    return InvokeHelperType::MakeItSo(
-        storage->runnable_,
-        Unwrappers::Unwrap(get<bound_indices>(storage->bound_args_))...,
-        CallbackForward(unbound_args)...);
+    const StorageType* storage = static_cast<StorageType*>(base);
+    static constexpr size_t num_bound_args =
+        std::tuple_size<decltype(storage->bound_args_)>::value;
+    return RunImpl(storage->functor_,
+                   storage->bound_args_,
+                   MakeIndexSequence<num_bound_args>(),
+                   std::forward<UnboundArgs>(unbound_args)...);
+  }
+
+ private:
+  template <typename Functor, typename BoundArgsTuple, size_t... indices>
+  static inline R RunImpl(Functor&& functor,
+                          BoundArgsTuple&& bound,
+                          IndexSequence<indices...>,
+                          UnboundArgs&&... unbound_args) {
+    static constexpr bool is_method =
+        FunctorTraits<typename std::decay<Functor>::type>::is_method;
+
+    using DecayedArgsTuple = typename std::decay<BoundArgsTuple>::type;
+    static constexpr bool is_weak_call =
+        IsWeakMethod<is_method,
+                     typename std::tuple_element<
+                         indices,
+                         DecayedArgsTuple>::type...>::value;
+
+    return InvokeHelper<is_weak_call, R>::MakeItSo(
+        std::forward<Functor>(functor),
+        Unwrap(base::get<indices>(std::forward<BoundArgsTuple>(bound)))...,
+        std::forward<UnboundArgs>(unbound_args)...);
   }
 };
 
+// Used to implement MakeUnboundRunType.
+template <typename Functor, typename... BoundArgs>
+struct MakeUnboundRunTypeImpl {
+  using RunType =
+      typename FunctorTraits<typename std::decay<Functor>::type>::RunType;
+  using ReturnType = ExtractReturnType<RunType>;
+  using Args = ExtractArgs<RunType>;
+  using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), Args>;
+  using Type = MakeFunctionType<ReturnType, UnboundArgs>;
+};
+template <typename Functor>
+typename std::enable_if<FunctorTraits<Functor>::is_nullable, bool>::type
+IsNull(const Functor& functor) {
+  return !functor;
+}
+
+template <typename Functor>
+typename std::enable_if<!FunctorTraits<Functor>::is_nullable, bool>::type
+IsNull(const Functor&) {
+  return false;
+}
 
 // BindState<>
 //
-// This stores all the state passed into Bind() and is also where most
-// of the template resolution magic occurs.
-//
-// Runnable is the functor we are binding arguments to.
-// RunType is type of the Run() function that the Invoker<> should use.
-// Normally, this is the same as the RunType of the Runnable, but it can
-// be different if an adapter like IgnoreResult() has been used.
-//
-// BoundArgs contains the storage type for all the bound arguments.
-template <typename Runnable, typename RunType, typename... BoundArgs>
-struct BindState;
-
-template <typename Runnable,
-          typename R,
-          typename... Args,
-          typename... BoundArgs>
-struct BindState<Runnable, R(Args...), BoundArgs...> final
-    : public BindStateBase {
- private:
-  using StorageType = BindState<Runnable, R(Args...), BoundArgs...>;
-  using RunnableType = Runnable;
-
-  // true_type if Runnable is a method invocation and the first bound argument
-  // is a WeakPtr.
-  using IsWeakCall =
-      IsWeakMethod<HasIsMethodTag<Runnable>::value, BoundArgs...>;
-
-  using BoundIndices = MakeIndexSequence<sizeof...(BoundArgs)>;
-  using Unwrappers = TypeList<UnwrapTraits<BoundArgs>...>;
-  using UnboundForwardArgs = DropTypeListItem<
-      sizeof...(BoundArgs),
-      TypeList<typename CallbackParamTraits<Args>::ForwardType...>>;
-  using UnboundForwardRunType = MakeFunctionType<R, UnboundForwardArgs>;
-
-  using InvokeHelperArgs = ConcatTypeLists<
-      TypeList<typename UnwrapTraits<BoundArgs>::ForwardType...>,
-      UnboundForwardArgs>;
-  using InvokeHelperType =
-      InvokeHelper<IsWeakCall::value, R, Runnable, InvokeHelperArgs>;
-
-  using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), TypeList<Args...>>;
-
- public:
-  using InvokerType = Invoker<BoundIndices, StorageType, Unwrappers,
-                              InvokeHelperType, UnboundForwardRunType>;
-  using UnboundRunType = MakeFunctionType<R, UnboundArgs>;
-
-  BindState(const Runnable& runnable, const BoundArgs&... bound_args)
+// This stores all the state passed into Bind().
+template <typename Functor, typename... BoundArgs>
+struct BindState final : BindStateBase {
+  template <typename ForwardFunctor, typename... ForwardBoundArgs>
+  explicit BindState(ForwardFunctor&& functor, ForwardBoundArgs&&... bound_args)
       : BindStateBase(&Destroy),
-        runnable_(runnable),
-        ref_(bound_args...),
-        bound_args_(bound_args...) {}
+      functor_(std::forward<ForwardFunctor>(functor)),
+        bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
+    DCHECK(!IsNull(functor_));
+  }
 
-  RunnableType runnable_;
-  MaybeScopedRefPtr<HasIsMethodTag<Runnable>::value, BoundArgs...> ref_;
-  Tuple<BoundArgs...> bound_args_;
+  Functor functor_;
+  std::tuple<BoundArgs...> bound_args_;
 
  private:
   ~BindState() {}
@@ -419,7 +396,58 @@
   }
 };
 
+// Used to implement MakeBindStateType.
+template <bool is_method, typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl;
+
+template <typename Functor, typename... BoundArgs>
+struct MakeBindStateTypeImpl<false, Functor, BoundArgs...> {
+  static_assert(!HasRefCountedTypeAsRawPtr<BoundArgs...>::value,
+                "A parameter is a refcounted type and needs scoped_refptr.");
+  using Type = BindState<typename std::decay<Functor>::type,
+                         typename std::decay<BoundArgs>::type...>;
+};
+
+template <typename Functor>
+struct MakeBindStateTypeImpl<true, Functor> {
+  using Type = BindState<typename std::decay<Functor>::type>;
+};
+
+template <typename Functor, typename Receiver, typename... BoundArgs>
+struct MakeBindStateTypeImpl<true, Functor, Receiver, BoundArgs...> {
+  static_assert(
+      !std::is_array<typename std::remove_reference<Receiver>::type>::value,
+      "First bound argument to a method cannot be an array.");
+  static_assert(!HasRefCountedTypeAsRawPtr<BoundArgs...>::value,
+                "A parameter is a refcounted type and needs scoped_refptr.");
+
+ private:
+  using DecayedReceiver = typename std::decay<Receiver>::type;
+
+ public:
+  using Type = BindState<
+      typename std::decay<Functor>::type,
+      typename std::conditional<
+          std::is_pointer<DecayedReceiver>::value,
+          scoped_refptr<typename std::remove_pointer<DecayedReceiver>::type>,
+          DecayedReceiver>::type,
+      typename std::decay<BoundArgs>::type...>;
+};
+
+template <typename Functor, typename... BoundArgs>
+using MakeBindStateType = typename MakeBindStateTypeImpl<
+    FunctorTraits<typename std::decay<Functor>::type>::is_method,
+    Functor,
+    BoundArgs...>::Type;
+
 }  // namespace internal
+
+// Returns a RunType of bound functor.
+// E.g. MakeUnboundRunType<R(A, B, C), A, B> is evaluated to R(C).
+template <typename Functor, typename... BoundArgs>
+using MakeUnboundRunType =
+    typename internal::MakeUnboundRunTypeImpl<Functor, BoundArgs...>::Type;
+
 }  // namespace base
 
 #endif  // BASE_BIND_INTERNAL_H_
diff --git a/base/bind_internal_win.h b/base/bind_internal_win.h
deleted file mode 100644
index 2ee12ef..0000000
--- a/base/bind_internal_win.h
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Specializations of RunnableAdapter<> for Windows specific calling
-// conventions.  Please see base/bind_internal.h for more info.
-
-#ifndef BASE_BIND_INTERNAL_WIN_H_
-#define BASE_BIND_INTERNAL_WIN_H_
-
-#include "build/build_config.h"
-
-// In the x64 architecture in Windows, __fastcall, __stdcall, etc, are all
-// the same as __cdecl which would turn the following specializations into
-// multiple definitions.
-#if !defined(ARCH_CPU_X86_64)
-
-namespace base {
-namespace internal {
-
-template <typename Functor>
-class RunnableAdapter;
-
-// __stdcall Function.
-template <typename R, typename... Args>
-class RunnableAdapter<R(__stdcall *)(Args...)> {
- public:
-  // MSVC 2013 doesn't support Type Alias of function types.
-  // Revisit this after we update it to newer version.
-  typedef R RunType(Args...);
-
-  explicit RunnableAdapter(R(__stdcall *function)(Args...))
-      : function_(function) {
-  }
-
-  R Run(typename CallbackParamTraits<Args>::ForwardType... args) {
-    return function_(args...);
-  }
-
- private:
-  R (__stdcall *function_)(Args...);
-};
-
-// __fastcall Function.
-template <typename R, typename... Args>
-class RunnableAdapter<R(__fastcall *)(Args...)> {
- public:
-  // MSVC 2013 doesn't support Type Alias of function types.
-  // Revisit this after we update it to newer version.
-  typedef R RunType(Args...);
-
-  explicit RunnableAdapter(R(__fastcall *function)(Args...))
-      : function_(function) {
-  }
-
-  R Run(typename CallbackParamTraits<Args>::ForwardType... args) {
-    return function_(args...);
-  }
-
- private:
-  R (__fastcall *function_)(Args...);
-};
-
-}  // namespace internal
-}  // namespace base
-
-#endif  // !defined(ARCH_CPU_X86_64)
-
-#endif  // BASE_BIND_INTERNAL_WIN_H_
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index 25b4a10..ba5113b 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -6,11 +6,12 @@
 
 #include <memory>
 #include <utility>
+#include <vector>
 
 #include "base/callback.h"
 #include "base/macros.h"
+#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/weak_ptr.h"
 #include "build/build_config.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -62,7 +63,7 @@
 
 class Parent {
  public:
-  virtual ~Parent() = default;
+  virtual ~Parent() {}
   void AddRef() const {}
   void Release() const {}
   virtual void VirtualSet() { value = kParentValue; }
@@ -72,14 +73,14 @@
 
 class Child : public Parent {
  public:
-  ~Child() override = default;
+  ~Child() override {}
   void VirtualSet() override { value = kChildValue; }
   void NonVirtualSet() { value = kChildValue; }
 };
 
 class NoRefParent {
  public:
-  virtual ~NoRefParent() = default;
+  virtual ~NoRefParent() {}
   virtual void VirtualSet() { value = kParentValue; }
   void NonVirtualSet() { value = kParentValue; }
   int value;
@@ -87,48 +88,93 @@
 
 class NoRefChild : public NoRefParent {
  public:
-  ~NoRefChild() override = default;
+  ~NoRefChild() override {}
+ private:
   void VirtualSet() override { value = kChildValue; }
   void NonVirtualSet() { value = kChildValue; }
 };
 
-// Used for probing the number of copies that occur if a type must be coerced
-// during argument forwarding in the Run() methods.
-struct DerivedCopyCounter {
-  DerivedCopyCounter(int* copies, int* assigns)
-      : copies_(copies), assigns_(assigns) {
-  }
+// Used for probing the number of copies and moves that occur if a type must be
+// coerced during argument forwarding in the Run() methods.
+struct DerivedCopyMoveCounter {
+  DerivedCopyMoveCounter(int* copies,
+                         int* assigns,
+                         int* move_constructs,
+                         int* move_assigns)
+      : copies_(copies),
+        assigns_(assigns),
+        move_constructs_(move_constructs),
+        move_assigns_(move_assigns) {}
   int* copies_;
   int* assigns_;
+  int* move_constructs_;
+  int* move_assigns_;
 };
 
-// Used for probing the number of copies in an argument.
-class CopyCounter {
+// Used for probing the number of copies and moves in an argument.
+class CopyMoveCounter {
  public:
-  CopyCounter(int* copies, int* assigns)
-      : copies_(copies), assigns_(assigns) {
+  CopyMoveCounter(int* copies,
+                  int* assigns,
+                  int* move_constructs,
+                  int* move_assigns)
+      : copies_(copies),
+        assigns_(assigns),
+        move_constructs_(move_constructs),
+        move_assigns_(move_assigns) {}
+
+  CopyMoveCounter(const CopyMoveCounter& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*copies_)++;
   }
 
-  CopyCounter(const CopyCounter& other)
+  CopyMoveCounter(CopyMoveCounter&& other)
       : copies_(other.copies_),
-        assigns_(other.assigns_) {
-    (*copies_)++;
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*move_constructs_)++;
   }
 
   // Probing for copies from coercion.
-  explicit CopyCounter(const DerivedCopyCounter& other)
+  explicit CopyMoveCounter(const DerivedCopyMoveCounter& other)
       : copies_(other.copies_),
-        assigns_(other.assigns_) {
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
     (*copies_)++;
   }
 
-  const CopyCounter& operator=(const CopyCounter& rhs) {
+  // Probing for moves from coercion.
+  explicit CopyMoveCounter(DerivedCopyMoveCounter&& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*move_constructs_)++;
+  }
+
+  const CopyMoveCounter& operator=(const CopyMoveCounter& rhs) {
     copies_ = rhs.copies_;
     assigns_ = rhs.assigns_;
+    move_constructs_ = rhs.move_constructs_;
+    move_assigns_ = rhs.move_assigns_;
 
-    if (assigns_) {
-      (*assigns_)++;
-    }
+    (*assigns_)++;
+
+    return *this;
+  }
+
+  const CopyMoveCounter& operator=(CopyMoveCounter&& rhs) {
+    copies_ = rhs.copies_;
+    assigns_ = rhs.assigns_;
+    move_constructs_ = rhs.move_constructs_;
+    move_assigns_ = rhs.move_assigns_;
+
+    (*move_assigns_)++;
 
     return *this;
   }
@@ -140,6 +186,47 @@
  private:
   int* copies_;
   int* assigns_;
+  int* move_constructs_;
+  int* move_assigns_;
+};
+
+// Used for probing the number of copies in an argument. The instance is a
+// copyable and non-movable type.
+class CopyCounter {
+ public:
+  CopyCounter(int* copies, int* assigns)
+      : counter_(copies, assigns, nullptr, nullptr) {}
+  CopyCounter(const CopyCounter& other) : counter_(other.counter_) {}
+  CopyCounter& operator=(const CopyCounter& other) {
+    counter_ = other.counter_;
+    return *this;
+  }
+
+  explicit CopyCounter(const DerivedCopyMoveCounter& other) : counter_(other) {}
+
+  int copies() const { return counter_.copies(); }
+
+ private:
+  CopyMoveCounter counter_;
+};
+
+// Used for probing the number of moves in an argument. The instance is a
+// non-copyable and movable type.
+class MoveCounter {
+ public:
+  MoveCounter(int* move_constructs, int* move_assigns)
+      : counter_(nullptr, nullptr, move_constructs, move_assigns) {}
+  MoveCounter(MoveCounter&& other) : counter_(std::move(other.counter_)) {}
+  MoveCounter& operator=(MoveCounter&& other) {
+    counter_ = std::move(other.counter_);
+    return *this;
+  }
+
+  explicit MoveCounter(DerivedCopyMoveCounter&& other)
+      : counter_(std::move(other)) {}
+
+ private:
+  CopyMoveCounter counter_;
 };
 
 class DeleteCounter {
@@ -190,7 +277,7 @@
   return s;
 }
 
-int GetCopies(const CopyCounter& counter) {
+int GetCopies(const CopyMoveCounter& counter) {
   return counter.copies();
 }
 
@@ -338,8 +425,8 @@
 //     preserve virtual dispatch).
 TEST_F(BindTest, FunctionTypeSupport) {
   EXPECT_CALL(static_func_mock_, VoidMethod0());
-  EXPECT_CALL(has_ref_, AddRef()).Times(5);
-  EXPECT_CALL(has_ref_, Release()).Times(5);
+  EXPECT_CALL(has_ref_, AddRef()).Times(4);
+  EXPECT_CALL(has_ref_, Release()).Times(4);
   EXPECT_CALL(has_ref_, VoidMethod0()).Times(2);
   EXPECT_CALL(has_ref_, VoidConstMethod0()).Times(2);
 
@@ -569,28 +656,6 @@
   EXPECT_EQ(3, const_array_cb.Run());
 }
 
-// Verify SupportsAddRefAndRelease correctly introspects the class type for
-// AddRef() and Release().
-//  - Class with AddRef() and Release()
-//  - Class without AddRef() and Release()
-//  - Derived Class with AddRef() and Release()
-//  - Derived Class without AddRef() and Release()
-//  - Derived Class with AddRef() and Release() and a private destructor.
-TEST_F(BindTest, SupportsAddRefAndRelease) {
-  EXPECT_TRUE(internal::SupportsAddRefAndRelease<HasRef>::value);
-  EXPECT_FALSE(internal::SupportsAddRefAndRelease<NoRef>::value);
-
-  // StrictMock<T> is a derived class of T.  So, we use StrictMock<HasRef> and
-  // StrictMock<NoRef> to test that SupportsAddRefAndRelease works over
-  // inheritance.
-  EXPECT_TRUE(internal::SupportsAddRefAndRelease<StrictMock<HasRef> >::value);
-  EXPECT_FALSE(internal::SupportsAddRefAndRelease<StrictMock<NoRef> >::value);
-
-  // This matters because the implementation creates a dummy class that
-  // inherits from the template type.
-  EXPECT_TRUE(internal::SupportsAddRefAndRelease<HasRefPrivateDtor>::value);
-}
-
 // Unretained() wrapper support.
 //   - Method bound to Unretained() non-const object.
 //   - Const method bound to Unretained() non-const object.
@@ -669,24 +734,23 @@
 
   int copies = 0;
   int assigns = 0;
-  CopyCounter counter(&copies, &assigns);
+  int move_constructs = 0;
+  int move_assigns = 0;
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
   Callback<int()> all_const_ref_cb =
       Bind(&GetCopies, ConstRef(counter));
   EXPECT_EQ(0, all_const_ref_cb.Run());
   EXPECT_EQ(0, copies);
   EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
 }
 
 TEST_F(BindTest, ScopedRefptr) {
-  // BUG: The scoped_refptr should cause the only AddRef()/Release() pair. But
-  // due to a bug in base::Bind(), there's an extra call when invoking the
-  // callback.
-  // https://code.google.com/p/chromium/issues/detail?id=251937
-  EXPECT_CALL(has_ref_, AddRef()).Times(2);
-  EXPECT_CALL(has_ref_, Release()).Times(2);
+  EXPECT_CALL(has_ref_, AddRef()).Times(1);
+  EXPECT_CALL(has_ref_, Release()).Times(1);
 
-  const scoped_refptr<StrictMock<HasRef> > refptr(&has_ref_);
-
+  const scoped_refptr<HasRef> refptr(&has_ref_);
   Callback<int()> scoped_refptr_const_ref_cb =
       Bind(&FunctionWithScopedRefptrFirstParam, base::ConstRef(refptr), 1);
   EXPECT_EQ(1, scoped_refptr_const_ref_cb.Run());
@@ -717,37 +781,63 @@
   EXPECT_EQ(1, deletes);
 }
 
-// Passed() wrapper support.
+TEST_F(BindTest, UniquePtrReceiver) {
+  std::unique_ptr<StrictMock<NoRef>> no_ref(new StrictMock<NoRef>);
+  EXPECT_CALL(*no_ref, VoidMethod0()).Times(1);
+  Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
+}
+
+// Tests for Passed() wrapper support:
 //   - Passed() can be constructed from a pointer to scoper.
 //   - Passed() can be constructed from a scoper rvalue.
 //   - Using Passed() gives Callback Ownership.
 //   - Ownership is transferred from Callback to callee on the first Run().
 //   - Callback supports unbound arguments.
-TEST_F(BindTest, ScopedPtr) {
+template <typename T>
+class BindMoveOnlyTypeTest : public ::testing::Test {
+};
+
+struct CustomDeleter {
+  void operator()(DeleteCounter* c) { delete c; }
+};
+
+using MoveOnlyTypesToTest =
+    ::testing::Types<std::unique_ptr<DeleteCounter>,
+                     std::unique_ptr<DeleteCounter>,
+                     std::unique_ptr<DeleteCounter, CustomDeleter>>;
+TYPED_TEST_CASE(BindMoveOnlyTypeTest, MoveOnlyTypesToTest);
+
+TYPED_TEST(BindMoveOnlyTypeTest, PassedToBoundCallback) {
   int deletes = 0;
 
-  // Tests the Passed() function's support for pointers.
-  scoped_ptr<DeleteCounter> ptr(new DeleteCounter(&deletes));
-  Callback<scoped_ptr<DeleteCounter>()> unused_callback =
-      Bind(&PassThru<scoped_ptr<DeleteCounter> >, Passed(&ptr));
+  TypeParam ptr(new DeleteCounter(&deletes));
+  Callback<TypeParam()> callback = Bind(&PassThru<TypeParam>, Passed(&ptr));
   EXPECT_FALSE(ptr.get());
   EXPECT_EQ(0, deletes);
 
   // If we never invoke the Callback, it retains ownership and deletes.
-  unused_callback.Reset();
+  callback.Reset();
   EXPECT_EQ(1, deletes);
+}
 
-  // Tests the Passed() function's support for rvalues.
-  deletes = 0;
-  DeleteCounter* counter = new DeleteCounter(&deletes);
-  Callback<scoped_ptr<DeleteCounter>()> callback =
-      Bind(&PassThru<scoped_ptr<DeleteCounter> >,
-           Passed(scoped_ptr<DeleteCounter>(counter)));
-  EXPECT_FALSE(ptr.get());
+TYPED_TEST(BindMoveOnlyTypeTest, PassedWithRvalue) {
+  int deletes = 0;
+  Callback<TypeParam()> callback = Bind(
+      &PassThru<TypeParam>, Passed(TypeParam(new DeleteCounter(&deletes))));
   EXPECT_EQ(0, deletes);
 
-  // Check that ownership can be transferred back out.
-  scoped_ptr<DeleteCounter> result = callback.Run();
+  // If we never invoke the Callback, it retains ownership and deletes.
+  callback.Reset();
+  EXPECT_EQ(1, deletes);
+}
+
+// Check that ownership can be transferred back out.
+TYPED_TEST(BindMoveOnlyTypeTest, ReturnMoveOnlyType) {
+  int deletes = 0;
+  DeleteCounter* counter = new DeleteCounter(&deletes);
+  Callback<TypeParam()> callback =
+      Bind(&PassThru<TypeParam>, Passed(TypeParam(counter)));
+  TypeParam result = callback.Run();
   ASSERT_EQ(counter, result.get());
   EXPECT_EQ(0, deletes);
 
@@ -758,58 +848,49 @@
   // Ensure that we actually did get ownership.
   result.reset();
   EXPECT_EQ(1, deletes);
-
-  // Test unbound argument forwarding.
-  Callback<scoped_ptr<DeleteCounter>(scoped_ptr<DeleteCounter>)> cb_unbound =
-      Bind(&PassThru<scoped_ptr<DeleteCounter> >);
-  ptr.reset(new DeleteCounter(&deletes));
-  cb_unbound.Run(std::move(ptr));
 }
 
-TEST_F(BindTest, UniquePtr) {
+TYPED_TEST(BindMoveOnlyTypeTest, UnboundForwarding) {
   int deletes = 0;
-
-  // Tests the Passed() function's support for pointers.
-  std::unique_ptr<DeleteCounter> ptr(new DeleteCounter(&deletes));
-  Callback<std::unique_ptr<DeleteCounter>()> unused_callback =
-      Bind(&PassThru<std::unique_ptr<DeleteCounter>>, Passed(&ptr));
-  EXPECT_FALSE(ptr.get());
-  EXPECT_EQ(0, deletes);
-
-  // If we never invoke the Callback, it retains ownership and deletes.
-  unused_callback.Reset();
-  EXPECT_EQ(1, deletes);
-
-  // Tests the Passed() function's support for rvalues.
-  deletes = 0;
-  DeleteCounter* counter = new DeleteCounter(&deletes);
-  Callback<std::unique_ptr<DeleteCounter>()> callback =
-      Bind(&PassThru<std::unique_ptr<DeleteCounter>>,
-           Passed(std::unique_ptr<DeleteCounter>(counter)));
-  EXPECT_FALSE(ptr.get());
-  EXPECT_EQ(0, deletes);
-
-  // Check that ownership can be transferred back out.
-  std::unique_ptr<DeleteCounter> result = callback.Run();
-  ASSERT_EQ(counter, result.get());
-  EXPECT_EQ(0, deletes);
-
-  // Resetting does not delete since ownership was transferred.
-  callback.Reset();
-  EXPECT_EQ(0, deletes);
-
-  // Ensure that we actually did get ownership.
-  result.reset();
-  EXPECT_EQ(1, deletes);
-
+  TypeParam ptr(new DeleteCounter(&deletes));
   // Test unbound argument forwarding.
-  Callback<std::unique_ptr<DeleteCounter>(std::unique_ptr<DeleteCounter>)>
-      cb_unbound = Bind(&PassThru<std::unique_ptr<DeleteCounter>>);
-  ptr.reset(new DeleteCounter(&deletes));
+  Callback<TypeParam(TypeParam)> cb_unbound = Bind(&PassThru<TypeParam>);
   cb_unbound.Run(std::move(ptr));
+  EXPECT_EQ(1, deletes);
 }
 
-// Argument Copy-constructor usage for non-reference parameters.
+void VerifyVector(const std::vector<std::unique_ptr<int>>& v) {
+  ASSERT_EQ(1u, v.size());
+  EXPECT_EQ(12345, *v[0]);
+}
+
+std::vector<std::unique_ptr<int>> AcceptAndReturnMoveOnlyVector(
+    std::vector<std::unique_ptr<int>> v) {
+  VerifyVector(v);
+  return v;
+}
+
+// Test that a vector containing move-only types can be used with Callback.
+TEST_F(BindTest, BindMoveOnlyVector) {
+  using MoveOnlyVector = std::vector<std::unique_ptr<int>>;
+
+  MoveOnlyVector v;
+  v.push_back(WrapUnique(new int(12345)));
+
+  // Early binding should work:
+  base::Callback<MoveOnlyVector()> bound_cb =
+      base::Bind(&AcceptAndReturnMoveOnlyVector, Passed(&v));
+  MoveOnlyVector intermediate_result = bound_cb.Run();
+  VerifyVector(intermediate_result);
+
+  // As should passing it as an argument to Run():
+  base::Callback<MoveOnlyVector(MoveOnlyVector)> unbound_cb =
+      base::Bind(&AcceptAndReturnMoveOnlyVector);
+  MoveOnlyVector final_result = unbound_cb.Run(std::move(intermediate_result));
+  VerifyVector(final_result);
+}
+
+// Argument copy-constructor usage for non-reference copy-only parameters.
 //   - Bound arguments are only copied once.
 //   - Forwarded arguments are only copied once.
 //   - Forwarded arguments with coercions are only copied twice (once for the
@@ -819,28 +900,178 @@
   int assigns = 0;
 
   CopyCounter counter(&copies, &assigns);
-
-  Callback<void()> copy_cb =
-      Bind(&VoidPolymorphic<CopyCounter>::Run, counter);
-  EXPECT_GE(1, copies);
+  Bind(&VoidPolymorphic<CopyCounter>::Run, counter);
+  EXPECT_EQ(1, copies);
   EXPECT_EQ(0, assigns);
 
   copies = 0;
   assigns = 0;
-  Callback<void(CopyCounter)> forward_cb =
-      Bind(&VoidPolymorphic<CopyCounter>::Run);
-  forward_cb.Run(counter);
-  EXPECT_GE(1, copies);
+  Bind(&VoidPolymorphic<CopyCounter>::Run, CopyCounter(&copies, &assigns));
+  EXPECT_EQ(1, copies);
   EXPECT_EQ(0, assigns);
 
   copies = 0;
   assigns = 0;
-  DerivedCopyCounter derived(&copies, &assigns);
-  Callback<void(CopyCounter)> coerce_cb =
-      Bind(&VoidPolymorphic<CopyCounter>::Run);
-  coerce_cb.Run(CopyCounter(derived));
-  EXPECT_GE(2, copies);
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(counter);
+  EXPECT_EQ(2, copies);
   EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(&copies, &assigns));
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  DerivedCopyMoveCounter derived(&copies, &assigns, nullptr, nullptr);
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(derived));
+  EXPECT_EQ(2, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run)
+      .Run(CopyCounter(
+          DerivedCopyMoveCounter(&copies, &assigns, nullptr, nullptr)));
+  EXPECT_EQ(2, copies);
+  EXPECT_EQ(0, assigns);
+}
+
+// Argument move-constructor usage for move-only parameters.
+//   - Bound arguments passed by move are not copied.
+TEST_F(BindTest, ArgumentMoves) {
+  int move_constructs = 0;
+  int move_assigns = 0;
+
+  Bind(&VoidPolymorphic<const MoveCounter&>::Run,
+       MoveCounter(&move_constructs, &move_assigns));
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  // TODO(tzik): Support binding move-only type into a non-reference parameter
+  // of a variant of Callback.
+
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<MoveCounter>::Run)
+      .Run(MoveCounter(&move_constructs, &move_assigns));
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<MoveCounter>::Run)
+      .Run(MoveCounter(DerivedCopyMoveCounter(
+          nullptr, nullptr, &move_constructs, &move_assigns)));
+  EXPECT_EQ(2, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+// Argument constructor usage for non-reference movable-copyable
+// parameters.
+//   - Bound arguments passed by move are not copied.
+//   - Forwarded arguments are only copied once.
+//   - Forwarded arguments with coercions are only copied once and moved once.
+TEST_F(BindTest, ArgumentCopiesAndMoves) {
+  int copies = 0;
+  int assigns = 0;
+  int move_constructs = 0;
+  int move_assigns = 0;
+
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run, counter);
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run,
+       CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run).Run(counter);
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  DerivedCopyMoveCounter derived_counter(&copies, &assigns, &move_constructs,
+                                         &move_assigns);
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(derived_counter));
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(DerivedCopyMoveCounter(
+          &copies, &assigns, &move_constructs, &move_assigns)));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(2, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+TEST_F(BindTest, CapturelessLambda) {
+  EXPECT_FALSE(internal::IsConvertibleToRunType<void>::value);
+  EXPECT_FALSE(internal::IsConvertibleToRunType<int>::value);
+  EXPECT_FALSE(internal::IsConvertibleToRunType<void(*)()>::value);
+  EXPECT_FALSE(internal::IsConvertibleToRunType<void(NoRef::*)()>::value);
+
+  auto f = []() {};
+  EXPECT_TRUE(internal::IsConvertibleToRunType<decltype(f)>::value);
+
+  int i = 0;
+  auto g = [i]() {};
+  EXPECT_FALSE(internal::IsConvertibleToRunType<decltype(g)>::value);
+
+  auto h = [](int, double) { return 'k'; };
+  EXPECT_TRUE((std::is_same<
+      char(int, double),
+      internal::ExtractCallableRunType<decltype(h)>>::value));
+
+  EXPECT_EQ(42, Bind([] { return 42; }).Run());
+  EXPECT_EQ(42, Bind([](int i) { return i * 7; }, 6).Run());
+
+  int x = 1;
+  base::Callback<void(int)> cb =
+      Bind([](int* x, int i) { *x *= i; }, Unretained(&x));
+  cb.Run(6);
+  EXPECT_EQ(6, x);
+  cb.Run(7);
+  EXPECT_EQ(42, x);
 }
 
 // Callback construction and assignment tests.
diff --git a/base/bit_cast.h b/base/bit_cast.h
new file mode 100644
index 0000000..c9514bc
--- /dev/null
+++ b/base/bit_cast.h
@@ -0,0 +1,100 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BIT_CAST_H_
+#define BASE_BIT_CAST_H_
+
+#include <string.h>
+#include <type_traits>
+
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+// bit_cast<Dest,Source> is a template function that implements the equivalent
+// of "*reinterpret_cast<Dest*>(&source)".  We need this in very low-level
+// functions like the protobuf library and fast math support.
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int32_t>(f);
+//   // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+//   // WRONG
+//   float f = 3.14159265358979;            // WRONG
+//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//
+// The address-casting method actually produces undefined behavior according to
+// the ISO C++98 specification, section 3.10 ("basic.lval"), paragraph 15.
+// (This did not substantially change in C++11.)  Roughly, this section says: if
+// an object in memory has one type, and a program accesses it with a different
+// type, then the result is undefined behavior for most values of "different
+// type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f).  And it is particularly true for conversions
+// between integral lvalues and floating-point lvalues.
+//
+// The purpose of this paragraph is to allow optimizing compilers to assume that
+// expressions with different types refer to different memory.  Compilers are
+// known to take advantage of this.  So a non-conforming program quietly
+// produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast.  The problem is type punning:
+// holding an object in memory of one type and reading its bits back using a
+// different type.
+//
+// The C++ standard is more subtle and complex than this, but that is the basic
+// idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard, especially by the
+// example in section 3.9 .  Also, of course, bit_cast<> wraps up the nasty
+// logic in one place.
+//
+// Fortunately memcpy() is very fast.  In optimized mode, compilers replace
+// calls to memcpy() with inline object code when the size argument is a
+// compile-time constant.  On a 32-bit system, memcpy(d,s,4) compiles to one
+// load and one store, and memcpy(d,s,8) compiles to two loads and two stores.
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  static_assert(sizeof(Dest) == sizeof(Source),
+                "bit_cast requires source and destination to be the same size");
+
+#if (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) || \
+     (defined(__clang__) && defined(_LIBCPP_VERSION)))
+  // GCC 5.1 contains the first libstdc++ with is_trivially_copyable.
+  // Assume libc++ Just Works: is_trivially_copyable added on May 13th 2011.
+  // However, with libc++ when GCC is the compiler the trait is buggy, see
+  // crbug.com/607158, so fall back to the less strict variant for non-clang.
+  static_assert(std::is_trivially_copyable<Dest>::value,
+                "non-trivially-copyable bit_cast is undefined");
+  static_assert(std::is_trivially_copyable<Source>::value,
+                "non-trivially-copyable bit_cast is undefined");
+#elif HAS_FEATURE(is_trivially_copyable)
+  // The compiler supports an equivalent intrinsic.
+  static_assert(__is_trivially_copyable(Dest),
+                "non-trivially-copyable bit_cast is undefined");
+  static_assert(__is_trivially_copyable(Source),
+                "non-trivially-copyable bit_cast is undefined");
+#elif COMPILER_GCC
+  // Fallback to compiler intrinsic on GCC and clang (which pretends to be
+  // GCC). This isn't quite the same as is_trivially_copyable but it'll do for
+  // our purpose.
+  static_assert(__has_trivial_copy(Dest),
+                "non-trivially-copyable bit_cast is undefined");
+  static_assert(__has_trivial_copy(Source),
+                "non-trivially-copyable bit_cast is undefined");
+#else
+  // Do nothing, let the bots handle it.
+#endif
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+#endif  // BASE_BIT_CAST_H_
diff --git a/base/build_time.h b/base/build_time.h
index 4f0abc3..83c9875 100644
--- a/base/build_time.h
+++ b/base/build_time.h
@@ -10,17 +10,19 @@
 
 namespace base {
 
-// GetBuildTime returns the time at which the current binary was built.
+// GetBuildTime returns the time at which the current binary was built,
+// rounded down to 5:00:00am at the start of the day in UTC.
 //
-// This uses the __DATE__ and __TIME__ macros, which don't trigger a rebuild
-// when they change. However, official builds will always be rebuilt from
-// scratch.
+// This uses a generated file, which doesn't trigger a rebuild when the time
+// changes. It will, however, be updated whenever //build/util/LASTCHANGE
+// changes.
 //
-// Also, since __TIME__ doesn't include a timezone, this value should only be
-// considered accurate to a day.
+// This value should only be considered accurate to within a day.
+// It will always be in the past.
 //
-// NOTE: This function is disabled except for the official builds, by default
-// the date returned is "Sep 02 2008 08:00:00 PST".
+// Note: If the build is not official (i.e. is_official_build = false)
+// this time will be set to 5:00:00am on the most recent first Sunday
+// of a month.
 Time BASE_EXPORT GetBuildTime();
 
 }  // namespace base
diff --git a/base/build_time_unittest.cc b/base/build_time_unittest.cc
index aac64a7..64886b4 100644
--- a/base/build_time_unittest.cc
+++ b/base/build_time_unittest.cc
@@ -3,36 +3,43 @@
 // found in the LICENSE file.
 
 #include "base/build_time.h"
+#if !defined(DONT_EMBED_BUILD_METADATA)
+#include "base/generated_build_date.h"
+#endif
+#include "base/time/time.h"
 
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(BuildTime, DateLooksValid) {
 #if !defined(DONT_EMBED_BUILD_METADATA)
-  char build_date[] = __DATE__;
+  char build_date[] = BUILD_DATE;
 #else
-  char build_date[] = "Sep 02 2008";
+  char build_date[] = "Sep 02 2008 05:00:00";
 #endif
 
-  EXPECT_EQ(11u, strlen(build_date));
+  EXPECT_EQ(20u, strlen(build_date));
   EXPECT_EQ(' ', build_date[3]);
   EXPECT_EQ(' ', build_date[6]);
+  EXPECT_EQ(' ', build_date[11]);
+  EXPECT_EQ('0', build_date[12]);
+  EXPECT_EQ('5', build_date[13]);
+  EXPECT_EQ(':', build_date[14]);
+  EXPECT_EQ('0', build_date[15]);
+  EXPECT_EQ('0', build_date[16]);
+  EXPECT_EQ(':', build_date[17]);
+  EXPECT_EQ('0', build_date[18]);
+  EXPECT_EQ('0', build_date[19]);
 }
 
-TEST(BuildTime, TimeLooksValid) {
-#if defined(DONT_EMBED_BUILD_METADATA)
-  char build_time[] = "08:00:00";
-#else
-  char build_time[] = __TIME__;
+TEST(BuildTime, InThePast) {
+  EXPECT_LT(base::GetBuildTime(), base::Time::Now());
+  EXPECT_LT(base::GetBuildTime(), base::Time::NowFromSystemTime());
+}
+
+#if !defined(DONT_EMBED_BUILD_METADATA)
+TEST(BuildTime, NotTooFar) {
+  // BuildTime must be less than 45 days old.
+  base::Time cutoff(base::Time::Now() - base::TimeDelta::FromDays(45));
+  EXPECT_GT(base::GetBuildTime(), cutoff);
+}
 #endif
-
-  EXPECT_EQ(8u, strlen(build_time));
-  EXPECT_EQ(':', build_time[2]);
-  EXPECT_EQ(':', build_time[5]);
-}
-
-TEST(BuildTime, DoesntCrash) {
-  // Since __DATE__ isn't updated unless one does a clobber build, we can't
-  // really test the value returned by it, except to check that it doesn't
-  // crash.
-  base::GetBuildTime();
-}
diff --git a/base/callback.h b/base/callback.h
index 3bf0008..e087c73 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -7,7 +7,6 @@
 
 #include "base/callback_forward.h"
 #include "base/callback_internal.h"
-#include "base/template_util.h"
 
 // NOTE: Header files that do not require the full definition of Callback or
 // Closure should #include "base/callback_forward.h" instead of this file.
@@ -188,8 +187,8 @@
 //
 // PASSING PARAMETERS AS A scoped_ptr
 //
-//   void TakesOwnership(scoped_ptr<Foo> arg) {}
-//   scoped_ptr<Foo> f(new Foo);
+//   void TakesOwnership(std::unique_ptr<Foo> arg) {}
+//   std::unique_ptr<Foo> f(new Foo);
 //   // f becomes null during the following call.
 //   base::Closure cb = base::Bind(&TakesOwnership, base::Passed(&f));
 //
@@ -341,63 +340,54 @@
 //      void Bar(char* ptr);
 //      Bind(&Foo, "test");
 //      Bind(&Bar, "test");  // This fails because ptr is not const.
-
-namespace base {
-
-// First, we forward declare the Callback class template. This informs the
-// compiler that the template only has 1 type parameter which is the function
-// signature that the Callback is representing.
-//
-// After this, create template specializations for 0-7 parameters. Note that
-// even though the template typelist grows, the specialization still
-// only has one type: the function signature.
 //
 // If you are thinking of forward declaring Callback in your own header file,
 // please include "base/callback_forward.h" instead.
 
-namespace internal {
-template <typename Runnable, typename RunType, typename... BoundArgsType>
-struct BindState;
-}  // namespace internal
+namespace base {
 
-template <typename R, typename... Args>
-class Callback<R(Args...)> : public internal::CallbackBase {
+template <typename R, typename... Args, internal::CopyMode copy_mode>
+class Callback<R(Args...), copy_mode>
+    : public internal::CallbackBase<copy_mode> {
+ private:
+  using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
+
  public:
   // MSVC 2013 doesn't support Type Alias of function types.
   // Revisit this after we update it to newer version.
   typedef R RunType(Args...);
 
-  Callback() : CallbackBase(nullptr) { }
+  Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
 
-  template <typename Runnable, typename BindRunType, typename... BoundArgsType>
-  explicit Callback(
-      internal::BindState<Runnable, BindRunType, BoundArgsType...>* bind_state)
-      : CallbackBase(bind_state) {
-    // Force the assignment to a local variable of PolymorphicInvoke
-    // so the compiler will typecheck that the passed in Run() method has
-    // the correct type.
-    PolymorphicInvoke invoke_func =
-        &internal::BindState<Runnable, BindRunType, BoundArgsType...>
-            ::InvokerType::Run;
-    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+  Callback(internal::BindStateBase* bind_state,
+           PolymorphicInvoke invoke_func)
+      : internal::CallbackBase<copy_mode>(bind_state) {
+    using InvokeFuncStorage =
+        typename internal::CallbackBase<copy_mode>::InvokeFuncStorage;
+    this->polymorphic_invoke_ =
+        reinterpret_cast<InvokeFuncStorage>(invoke_func);
   }
 
   bool Equals(const Callback& other) const {
-    return CallbackBase::Equals(other);
+    return this->EqualsInternal(other);
   }
 
-  R Run(typename internal::CallbackParamTraits<Args>::ForwardType... args)
-      const {
+  // Run() makes an extra copy compared to directly calling the bound function
+  // if an argument is passed-by-value and is copyable-but-not-movable:
+  // i.e. below copies CopyableNonMovableType twice.
+  //   void F(CopyableNonMovableType) {}
+  //   Bind(&F).Run(CopyableNonMovableType());
+  //
+  // We can not fully apply Perfect Forwarding idiom to the callchain from
+  // Callback::Run() to the target function. Perfect Forwarding requires
+  // knowing how the caller will pass the arguments. However, the signature of
+  // InvokerType::Run() needs to be fixed in the callback constructor, so Run()
+  // cannot template its arguments based on how it's called.
+  R Run(Args... args) const {
     PolymorphicInvoke f =
-        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
-
-    return f(bind_state_.get(), internal::CallbackForward(args)...);
+        reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke_);
+    return f(this->bind_state_.get(), std::forward<Args>(args)...);
   }
-
- private:
-  using PolymorphicInvoke =
-      R(*)(internal::BindStateBase*,
-           typename internal::CallbackParamTraits<Args>::ForwardType...);
 };
 
 }  // namespace base
diff --git a/base/callback_forward.h b/base/callback_forward.h
index a9a263a..8b9b89c 100644
--- a/base/callback_forward.h
+++ b/base/callback_forward.h
@@ -6,8 +6,19 @@
 #define BASE_CALLBACK_FORWARD_H_
 
 namespace base {
+namespace internal {
 
-template <typename Sig>
+// CopyMode is used to control the copyablity of a Callback.
+// MoveOnly indicates the Callback is not copyable but movable, and Copyable
+// indicates it is copyable and movable.
+enum class CopyMode {
+  MoveOnly, Copyable,
+};
+
+}  // namespace internal
+
+template <typename Signature,
+          internal::CopyMode copy_mode = internal::CopyMode::Copyable>
 class Callback;
 
 // Syntactic sugar to make Callback<void()> easier to declare since it
diff --git a/base/callback_helpers.cc b/base/callback_helpers.cc
index ef02b2b..838e6c8 100644
--- a/base/callback_helpers.cc
+++ b/base/callback_helpers.cc
@@ -8,29 +8,33 @@
 
 namespace base {
 
-ScopedClosureRunner::ScopedClosureRunner() {
-}
+ScopedClosureRunner::ScopedClosureRunner() {}
 
 ScopedClosureRunner::ScopedClosureRunner(const Closure& closure)
-    : closure_(closure) {
-}
+    : closure_(closure) {}
 
 ScopedClosureRunner::~ScopedClosureRunner() {
   if (!closure_.is_null())
     closure_.Run();
 }
 
-void ScopedClosureRunner::Reset() {
+ScopedClosureRunner::ScopedClosureRunner(ScopedClosureRunner&& other)
+    : closure_(other.Release()) {}
+
+ScopedClosureRunner& ScopedClosureRunner::operator=(
+    ScopedClosureRunner&& other) {
+  ReplaceClosure(other.Release());
+  return *this;
+}
+
+void ScopedClosureRunner::RunAndReset() {
   Closure old_closure = Release();
   if (!old_closure.is_null())
     old_closure.Run();
 }
 
-void ScopedClosureRunner::Reset(const Closure& closure) {
-  Closure old_closure = Release();
+void ScopedClosureRunner::ReplaceClosure(const Closure& closure) {
   closure_ = closure;
-  if (!old_closure.is_null())
-    old_closure.Run();
 }
 
 Closure ScopedClosureRunner::Release() {
diff --git a/base/callback_helpers.h b/base/callback_helpers.h
index 8608039..782371f 100644
--- a/base/callback_helpers.h
+++ b/base/callback_helpers.h
@@ -27,16 +27,27 @@
   return ret;
 }
 
-// ScopedClosureRunner is akin to scoped_ptr for Closures. It ensures that the
-// Closure is executed and deleted no matter how the current scope exits.
+// ScopedClosureRunner is akin to std::unique_ptr<> for Closures. It ensures
+// that the Closure is executed no matter how the current scope exits.
 class BASE_EXPORT ScopedClosureRunner {
  public:
   ScopedClosureRunner();
   explicit ScopedClosureRunner(const Closure& closure);
   ~ScopedClosureRunner();
 
-  void Reset();
-  void Reset(const Closure& closure);
+  ScopedClosureRunner(ScopedClosureRunner&& other);
+
+  // Releases the current closure if it's set and replaces it with the closure
+  // from |other|.
+  ScopedClosureRunner& operator=(ScopedClosureRunner&& other);
+
+  // Calls the current closure and resets it, so it wont be called again.
+  void RunAndReset();
+
+  // Replaces closure with the new one releasing the old one without calling it.
+  void ReplaceClosure(const Closure& closure);
+
+  // Releases the Closure without calling.
   Closure Release() WARN_UNUSED_RESULT;
 
  private:
diff --git a/base/callback_helpers_unittest.cc b/base/callback_helpers_unittest.cc
index 3b17a6b..8283996 100644
--- a/base/callback_helpers_unittest.cc
+++ b/base/callback_helpers_unittest.cc
@@ -14,7 +14,7 @@
   (*value)++;
 }
 
-TEST(BindHelpersTest, TestScopedClosureRunnerExitScope) {
+TEST(CallbackHelpersTest, TestScopedClosureRunnerExitScope) {
   int run_count = 0;
   {
     base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
@@ -23,7 +23,7 @@
   EXPECT_EQ(1, run_count);
 }
 
-TEST(BindHelpersTest, TestScopedClosureRunnerRelease) {
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRelease) {
   int run_count = 0;
   base::Closure c;
   {
@@ -36,26 +36,59 @@
   EXPECT_EQ(1, run_count);
 }
 
-TEST(BindHelpersTest, TestScopedClosureRunnerReset) {
+TEST(CallbackHelpersTest, TestScopedClosureRunnerReplaceClosure) {
   int run_count_1 = 0;
   int run_count_2 = 0;
   {
     base::ScopedClosureRunner runner;
-    runner.Reset(base::Bind(&Increment, &run_count_1));
-    runner.Reset(base::Bind(&Increment, &run_count_2));
-    EXPECT_EQ(1, run_count_1);
+    runner.ReplaceClosure(base::Bind(&Increment, &run_count_1));
+    runner.ReplaceClosure(base::Bind(&Increment, &run_count_2));
+    EXPECT_EQ(0, run_count_1);
     EXPECT_EQ(0, run_count_2);
   }
+  EXPECT_EQ(0, run_count_1);
   EXPECT_EQ(1, run_count_2);
+}
 
+TEST(CallbackHelpersTest, TestScopedClosureRunnerRunAndReset) {
   int run_count_3 = 0;
   {
     base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_3));
     EXPECT_EQ(0, run_count_3);
-    runner.Reset();
+    runner.RunAndReset();
     EXPECT_EQ(1, run_count_3);
   }
   EXPECT_EQ(1, run_count_3);
 }
 
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveConstructor) {
+  int run_count = 0;
+  {
+    std::unique_ptr<base::ScopedClosureRunner> runner(
+        new base::ScopedClosureRunner(base::Bind(&Increment, &run_count)));
+    base::ScopedClosureRunner runner2(std::move(*runner));
+    runner.reset();
+    EXPECT_EQ(0, run_count);
+  }
+  EXPECT_EQ(1, run_count);
+}
+
+TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveAssignment) {
+  int run_count_1 = 0;
+  int run_count_2 = 0;
+  {
+    base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_1));
+    {
+      base::ScopedClosureRunner runner2(base::Bind(&Increment, &run_count_2));
+      runner = std::move(runner2);
+      EXPECT_EQ(0, run_count_1);
+      EXPECT_EQ(0, run_count_2);
+    }
+    EXPECT_EQ(0, run_count_1);
+    EXPECT_EQ(0, run_count_2);
+  }
+  EXPECT_EQ(0, run_count_1);
+  EXPECT_EQ(1, run_count_2);
+}
+
 }  // namespace
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
index 2553fe7..4c8ccae 100644
--- a/base/callback_internal.cc
+++ b/base/callback_internal.cc
@@ -18,29 +18,66 @@
     destructor_(this);
 }
 
-CallbackBase::CallbackBase(const CallbackBase& c) = default;
-CallbackBase& CallbackBase::operator=(const CallbackBase& c) = default;
-
-void CallbackBase::Reset() {
-  polymorphic_invoke_ = NULL;
-  // NULL the bind_state_ last, since it may be holding the last ref to whatever
-  // object owns us, and we may be deleted after that.
-  bind_state_ = NULL;
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c)
+    : bind_state_(std::move(c.bind_state_)),
+      polymorphic_invoke_(c.polymorphic_invoke_) {
+  c.polymorphic_invoke_ = nullptr;
 }
 
-bool CallbackBase::Equals(const CallbackBase& other) const {
+CallbackBase<CopyMode::MoveOnly>&
+CallbackBase<CopyMode::MoveOnly>::operator=(CallbackBase&& c) {
+  bind_state_ = std::move(c.bind_state_);
+  polymorphic_invoke_ = c.polymorphic_invoke_;
+  c.polymorphic_invoke_ = nullptr;
+  return *this;
+}
+
+void CallbackBase<CopyMode::MoveOnly>::Reset() {
+  polymorphic_invoke_ = nullptr;
+  // NULL the bind_state_ last, since it may be holding the last ref to whatever
+  // object owns us, and we may be deleted after that.
+  bind_state_ = nullptr;
+}
+
+bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
+    const CallbackBase& other) const {
   return bind_state_.get() == other.bind_state_.get() &&
          polymorphic_invoke_ == other.polymorphic_invoke_;
 }
 
-CallbackBase::CallbackBase(BindStateBase* bind_state)
-    : bind_state_(bind_state),
-      polymorphic_invoke_(NULL) {
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(
+    BindStateBase* bind_state)
+    : bind_state_(bind_state) {
   DCHECK(!bind_state_.get() || bind_state_->ref_count_ == 1);
 }
 
-CallbackBase::~CallbackBase() {
+CallbackBase<CopyMode::MoveOnly>::~CallbackBase() {}
+
+CallbackBase<CopyMode::Copyable>::CallbackBase(
+    const CallbackBase& c)
+    : CallbackBase<CopyMode::MoveOnly>(nullptr) {
+  bind_state_ = c.bind_state_;
+  polymorphic_invoke_ = c.polymorphic_invoke_;
 }
 
+CallbackBase<CopyMode::Copyable>::CallbackBase(CallbackBase&& c)
+    : CallbackBase<CopyMode::MoveOnly>(std::move(c)) {}
+
+CallbackBase<CopyMode::Copyable>&
+CallbackBase<CopyMode::Copyable>::operator=(const CallbackBase& c) {
+  bind_state_ = c.bind_state_;
+  polymorphic_invoke_ = c.polymorphic_invoke_;
+  return *this;
+}
+
+CallbackBase<CopyMode::Copyable>&
+CallbackBase<CopyMode::Copyable>::operator=(CallbackBase&& c) {
+  *static_cast<CallbackBase<CopyMode::MoveOnly>*>(this) = std::move(c);
+  return *this;
+}
+
+template class CallbackBase<CopyMode::MoveOnly>;
+template class CallbackBase<CopyMode::Copyable>;
+
 }  // namespace internal
 }  // namespace base
diff --git a/base/callback_internal.h b/base/callback_internal.h
index 630a5c4..0fe0b2d 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -8,21 +8,15 @@
 #ifndef BASE_CALLBACK_INTERNAL_H_
 #define BASE_CALLBACK_INTERNAL_H_
 
-#include <stddef.h>
-#include <map>
-#include <memory>
-#include <type_traits>
-#include <vector>
-
 #include "base/atomic_ref_count.h"
 #include "base/base_export.h"
+#include "base/callback_forward.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/template_util.h"
 
 namespace base {
 namespace internal {
+template <CopyMode copy_mode>
 class CallbackBase;
 
 // BindStateBase is used to provide an opaque handle that the Callback
@@ -44,6 +38,7 @@
 
  private:
   friend class scoped_refptr<BindStateBase>;
+  template <CopyMode copy_mode>
   friend class CallbackBase;
 
   void AddRef();
@@ -59,13 +54,17 @@
 
 // Holds the Callback methods that don't require specialization to reduce
 // template bloat.
-class BASE_EXPORT CallbackBase {
+// CallbackBase<MoveOnly> is a direct base class of MoveOnly callbacks, and
+// CallbackBase<Copyable> uses CallbackBase<MoveOnly> for its implementation.
+template <>
+class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
  public:
-  CallbackBase(const CallbackBase& c);
-  CallbackBase& operator=(const CallbackBase& c);
+  CallbackBase(CallbackBase&& c);
+  CallbackBase& operator=(CallbackBase&& c);
 
   // Returns true if Callback is null (doesn't refer to anything).
   bool is_null() const { return bind_state_.get() == NULL; }
+  explicit operator bool() const { return !is_null(); }
 
   // Returns the Callback into an uninitialized state.
   void Reset();
@@ -78,7 +77,7 @@
   using InvokeFuncStorage = void(*)();
 
   // Returns true if this callback equals |other|. |other| may be null.
-  bool Equals(const CallbackBase& other) const;
+  bool EqualsInternal(const CallbackBase& other) const;
 
   // Allow initializing of |bind_state_| via the constructor to avoid default
   // initialization of the scoped_refptr.  We do not also initialize
@@ -92,143 +91,26 @@
   ~CallbackBase();
 
   scoped_refptr<BindStateBase> bind_state_;
-  InvokeFuncStorage polymorphic_invoke_;
+  InvokeFuncStorage polymorphic_invoke_ = nullptr;
 };
 
-// A helper template to determine if given type is non-const move-only-type,
-// i.e. if a value of the given type should be passed via std::move() in a
-// destructive way. Types are considered to be move-only if they have a
-// sentinel MoveOnlyTypeForCPP03 member: a class typically gets this from using
-// the DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND macro.
-// It would be easy to generalize this trait to all move-only types... but this
-// confuses template deduction in VS2013 with certain types such as
-// std::unique_ptr.
-// TODO(dcheng): Revisit this when Windows switches to VS2015 by default.
-template <typename T> struct IsMoveOnlyType {
-  template <typename U>
-  static YesType Test(const typename U::MoveOnlyTypeForCPP03*);
-
-  template <typename U>
-  static NoType Test(...);
-
-  static const bool value = sizeof((Test<T>(0))) == sizeof(YesType) &&
-                            !is_const<T>::value;
+// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
+template <>
+class BASE_EXPORT CallbackBase<CopyMode::Copyable>
+    : public CallbackBase<CopyMode::MoveOnly> {
+ public:
+  CallbackBase(const CallbackBase& c);
+  CallbackBase(CallbackBase&& c);
+  CallbackBase& operator=(const CallbackBase& c);
+  CallbackBase& operator=(CallbackBase&& c);
+ protected:
+  explicit CallbackBase(BindStateBase* bind_state)
+      : CallbackBase<CopyMode::MoveOnly>(bind_state) {}
+  ~CallbackBase() {}
 };
 
-// Specialization of IsMoveOnlyType so that std::unique_ptr is still considered
-// move-only, even without the sentinel member.
-template <typename T>
-struct IsMoveOnlyType<std::unique_ptr<T>> : std::true_type {};
-
-template <typename>
-struct CallbackParamTraitsForMoveOnlyType;
-
-template <typename>
-struct CallbackParamTraitsForNonMoveOnlyType;
-
-// TODO(tzik): Use a default parameter once MSVS supports variadic templates
-// with default values.
-// http://connect.microsoft.com/VisualStudio/feedbackdetail/view/957801/compilation-error-with-variadic-templates
-//
-// This is a typetraits object that's used to take an argument type, and
-// extract a suitable type for storing and forwarding arguments.
-//
-// In particular, it strips off references, and converts arrays to
-// pointers for storage; and it avoids accidentally trying to create a
-// "reference of a reference" if the argument is a reference type.
-//
-// This array type becomes an issue for storage because we are passing bound
-// parameters by const reference. In this case, we end up passing an actual
-// array type in the initializer list which C++ does not allow.  This will
-// break passing of C-string literals.
-template <typename T>
-struct CallbackParamTraits
-    : std::conditional<IsMoveOnlyType<T>::value,
-         CallbackParamTraitsForMoveOnlyType<T>,
-         CallbackParamTraitsForNonMoveOnlyType<T>>::type {
-};
-
-template <typename T>
-struct CallbackParamTraitsForNonMoveOnlyType {
-  using ForwardType = const T&;
-  using StorageType = T;
-};
-
-// The Storage should almost be impossible to trigger unless someone manually
-// specifies type of the bind parameters.  However, in case they do,
-// this will guard against us accidentally storing a reference parameter.
-//
-// The ForwardType should only be used for unbound arguments.
-template <typename T>
-struct CallbackParamTraitsForNonMoveOnlyType<T&> {
-  using ForwardType = T&;
-  using StorageType = T;
-};
-
-// Note that for array types, we implicitly add a const in the conversion. This
-// means that it is not possible to bind array arguments to functions that take
-// a non-const pointer. Trying to specialize the template based on a "const
-// T[n]" does not seem to match correctly, so we are stuck with this
-// restriction.
-template <typename T, size_t n>
-struct CallbackParamTraitsForNonMoveOnlyType<T[n]> {
-  using ForwardType = const T*;
-  using StorageType = const T*;
-};
-
-// See comment for CallbackParamTraits<T[n]>.
-template <typename T>
-struct CallbackParamTraitsForNonMoveOnlyType<T[]> {
-  using ForwardType = const T*;
-  using StorageType = const T*;
-};
-
-// Parameter traits for movable-but-not-copyable scopers.
-//
-// Callback<>/Bind() understands movable-but-not-copyable semantics where
-// the type cannot be copied but can still have its state destructively
-// transferred (aka. moved) to another instance of the same type by calling a
-// helper function.  When used with Bind(), this signifies transferal of the
-// object's state to the target function.
-//
-// For these types, the ForwardType must not be a const reference, or a
-// reference.  A const reference is inappropriate, and would break const
-// correctness, because we are implementing a destructive move.  A non-const
-// reference cannot be used with temporaries which means the result of a
-// function or a cast would not be usable with Callback<> or Bind().
-template <typename T>
-struct CallbackParamTraitsForMoveOnlyType {
-  using ForwardType = T;
-  using StorageType = T;
-};
-
-// CallbackForward() is a very limited simulation of C++11's std::forward()
-// used by the Callback/Bind system for a set of movable-but-not-copyable
-// types.  It is needed because forwarding a movable-but-not-copyable
-// argument to another function requires us to invoke the proper move
-// operator to create a rvalue version of the type.  The supported types are
-// whitelisted below as overloads of the CallbackForward() function. The
-// default template compiles out to be a no-op.
-//
-// In C++11, std::forward would replace all uses of this function.  However, it
-// is impossible to implement a general std::forward without C++11 due to a lack
-// of rvalue references.
-//
-// In addition to Callback/Bind, this is used by PostTaskAndReplyWithResult to
-// simulate std::forward() and forward the result of one Callback as a
-// parameter to another callback. This is to support Callbacks that return
-// the movable-but-not-copyable types whitelisted above.
-template <typename T>
-typename std::enable_if<!IsMoveOnlyType<T>::value, T>::type& CallbackForward(
-    T& t) {
-  return t;
-}
-
-template <typename T>
-typename std::enable_if<IsMoveOnlyType<T>::value, T>::type CallbackForward(
-    T& t) {
-  return std::move(t);
-}
+extern template class CallbackBase<CopyMode::MoveOnly>;
+extern template class CallbackBase<CopyMode::Copyable>;
 
 }  // namespace internal
 }  // namespace base
diff --git a/base/callback_list.h b/base/callback_list.h
index 7d6a478..7ab79dd 100644
--- a/base/callback_list.h
+++ b/base/callback_list.h
@@ -6,13 +6,12 @@
 #define BASE_CALLBACK_LIST_H_
 
 #include <list>
+#include <memory>
 
 #include "base/callback.h"
-#include "base/callback_internal.h"
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 
 // OVERVIEW:
 //
@@ -29,7 +28,7 @@
 //
 //   typedef base::Callback<void(const Foo&)> OnFooCallback;
 //
-//   scoped_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//   std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
 //   RegisterCallback(const OnFooCallback& cb) {
 //     return callback_list_.Add(cb);
 //   }
@@ -62,7 +61,7 @@
 //     // Do something.
 //   }
 //
-//   scoped_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//   std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
 //       foo_subscription_;
 //
 //   DISALLOW_COPY_AND_ASSIGN(MyWidgetListener);
@@ -103,9 +102,9 @@
   // Add a callback to the list. The callback will remain registered until the
   // returned Subscription is destroyed, which must occur before the
   // CallbackList is destroyed.
-  scoped_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
+  std::unique_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
     DCHECK(!cb.is_null());
-    return scoped_ptr<Subscription>(
+    return std::unique_ptr<Subscription>(
         new Subscription(this, callbacks_.insert(callbacks_.end(), cb)));
   }
 
@@ -211,8 +210,8 @@
 
   CallbackList() {}
 
-  void Notify(
-      typename internal::CallbackParamTraits<Args>::ForwardType... args) {
+  template <typename... RunArgs>
+  void Notify(RunArgs&&... args) {
     typename internal::CallbackListBase<CallbackType>::Iterator it =
         this->GetIterator();
     CallbackType* cb;
diff --git a/base/callback_list_unittest.cc b/base/callback_list_unittest.cc
index 010efc5..62081e9 100644
--- a/base/callback_list_unittest.cc
+++ b/base/callback_list_unittest.cc
@@ -4,12 +4,12 @@
 
 #include "base/callback_list.h"
 
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
 #include "base/bind_helpers.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -38,7 +38,7 @@
     removal_subscription_.reset();
   }
   void SetSubscriptionToRemove(
-      scoped_ptr<CallbackList<void(void)>::Subscription> sub) {
+      std::unique_ptr<CallbackList<void(void)>::Subscription> sub) {
     removal_subscription_ = std::move(sub);
   }
 
@@ -46,7 +46,7 @@
 
  private:
   int total_;
-  scoped_ptr<CallbackList<void(void)>::Subscription> removal_subscription_;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> removal_subscription_;
   DISALLOW_COPY_AND_ASSIGN(Remover);
 };
 
@@ -74,7 +74,7 @@
   bool added_;
   int total_;
   CallbackList<void(void)>* cb_reg_;
-  scoped_ptr<CallbackList<void(void)>::Subscription> subscription_;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> subscription_;
   DISALLOW_COPY_AND_ASSIGN(Adder);
 };
 
@@ -118,42 +118,43 @@
   Summer s;
 
   CallbackList<void(int)> c1;
-  scoped_ptr<CallbackList<void(int)>::Subscription> subscription1 =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> subscription1 =
       c1.Add(Bind(&Summer::AddOneParam, Unretained(&s)));
 
   c1.Notify(1);
   EXPECT_EQ(1, s.value());
 
   CallbackList<void(int, int)> c2;
-  scoped_ptr<CallbackList<void(int, int)>::Subscription> subscription2 =
+  std::unique_ptr<CallbackList<void(int, int)>::Subscription> subscription2 =
       c2.Add(Bind(&Summer::AddTwoParam, Unretained(&s)));
 
   c2.Notify(1, 2);
   EXPECT_EQ(3, s.value());
 
   CallbackList<void(int, int, int)> c3;
-  scoped_ptr<CallbackList<void(int, int, int)>::Subscription>
+  std::unique_ptr<CallbackList<void(int, int, int)>::Subscription>
       subscription3 = c3.Add(Bind(&Summer::AddThreeParam, Unretained(&s)));
 
   c3.Notify(1, 2, 3);
   EXPECT_EQ(6, s.value());
 
   CallbackList<void(int, int, int, int)> c4;
-  scoped_ptr<CallbackList<void(int, int, int, int)>::Subscription>
+  std::unique_ptr<CallbackList<void(int, int, int, int)>::Subscription>
       subscription4 = c4.Add(Bind(&Summer::AddFourParam, Unretained(&s)));
 
   c4.Notify(1, 2, 3, 4);
   EXPECT_EQ(10, s.value());
 
   CallbackList<void(int, int, int, int, int)> c5;
-  scoped_ptr<CallbackList<void(int, int, int, int, int)>::Subscription>
+  std::unique_ptr<CallbackList<void(int, int, int, int, int)>::Subscription>
       subscription5 = c5.Add(Bind(&Summer::AddFiveParam, Unretained(&s)));
 
   c5.Notify(1, 2, 3, 4, 5);
   EXPECT_EQ(15, s.value());
 
   CallbackList<void(int, int, int, int, int, int)> c6;
-  scoped_ptr<CallbackList<void(int, int, int, int, int, int)>::Subscription>
+  std::unique_ptr<
+      CallbackList<void(int, int, int, int, int, int)>::Subscription>
       subscription6 = c6.Add(Bind(&Summer::AddSixParam, Unretained(&s)));
 
   c6.Notify(1, 2, 3, 4, 5, 6);
@@ -166,9 +167,9 @@
   CallbackList<void(void)> cb_reg;
   Listener a, b, c;
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
 
   EXPECT_TRUE(a_subscription.get());
@@ -181,7 +182,7 @@
 
   b_subscription.reset();
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> c_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> c_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&c)));
 
   cb_reg.Notify();
@@ -201,9 +202,9 @@
   CallbackList<void(int)> cb_reg;
   Listener a(1), b(-1), c(1);
 
-  scoped_ptr<CallbackList<void(int)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&a)));
-  scoped_ptr<CallbackList<void(int)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&b)));
 
   EXPECT_TRUE(a_subscription.get());
@@ -216,7 +217,7 @@
 
   b_subscription.reset();
 
-  scoped_ptr<CallbackList<void(int)>::Subscription> c_subscription =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> c_subscription =
       cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&c)));
 
   cb_reg.Notify(10);
@@ -237,15 +238,15 @@
   Listener a, b;
   Remover remover_1, remover_2;
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_1)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_2)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
 
   // |remover_1| will remove itself.
@@ -278,9 +279,9 @@
   CallbackList<void(void)> cb_reg;
   Adder a(&cb_reg);
   Listener b;
-  scoped_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Adder::AddCallback, Unretained(&a)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
 
   cb_reg.Notify();
@@ -308,7 +309,7 @@
   cb_reg.set_removal_callback(
       Bind(&Counter::Increment, Unretained(&remove_count)));
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> subscription =
       cb_reg.Add(Bind(&DoNothing));
 
   // Removing a subscription outside of iteration signals the callback.
@@ -318,12 +319,12 @@
 
   // Configure two subscriptions to remove themselves.
   Remover remover_1, remover_2;
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_1)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_2)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
   remover_1.SetSubscriptionToRemove(std::move(remover_1_sub));
   remover_2.SetSubscriptionToRemove(std::move(remover_2_sub));
 
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index 1f492d4..ce453a1 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -2,73 +2,50 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/bind.h"
 #include "base/callback.h"
+
+#include <memory>
+
+#include "base/bind.h"
 #include "base/callback_helpers.h"
 #include "base/callback_internal.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
-namespace {
-
-struct FakeInvoker {
-  // MSVC 2013 doesn't support Type Alias of function types.
-  // Revisit this after we update it to newer version.
-  typedef void RunType(internal::BindStateBase*);
-  static void Run(internal::BindStateBase*) {
-  }
-};
-
-}  // namespace
-
-namespace internal {
+void NopInvokeFunc(internal::BindStateBase*) {}
 
 // White-box testpoints to inject into a Callback<> object for checking
 // comparators and emptiness APIs.  Use a BindState that is specialized
 // based on a type we declared in the anonymous namespace above to remove any
 // chance of colliding with another instantiation and breaking the
 // one-definition-rule.
-template <>
-struct BindState<void(), void(), FakeInvoker>
-    : public BindStateBase {
- public:
-  BindState() : BindStateBase(&Destroy) {}
-  using InvokerType = FakeInvoker;
+struct FakeBindState1 : internal::BindStateBase {
+  FakeBindState1() : BindStateBase(&Destroy) {}
  private:
-  ~BindState() {}
-  static void Destroy(BindStateBase* self) {
-    delete static_cast<BindState*>(self);
+  ~FakeBindState1() {}
+  static void Destroy(internal::BindStateBase* self) {
+    delete static_cast<FakeBindState1*>(self);
   }
 };
 
-template <>
-struct BindState<void(), void(), FakeInvoker, FakeInvoker>
-    : public BindStateBase {
- public:
-  BindState() : BindStateBase(&Destroy) {}
-  using InvokerType = FakeInvoker;
+struct FakeBindState2 : internal::BindStateBase {
+  FakeBindState2() : BindStateBase(&Destroy) {}
  private:
-  ~BindState() {}
-  static void Destroy(BindStateBase* self) {
-    delete static_cast<BindState*>(self);
+  ~FakeBindState2() {}
+  static void Destroy(internal::BindStateBase* self) {
+    delete static_cast<FakeBindState2*>(self);
   }
 };
-}  // namespace internal
 
 namespace {
 
-using FakeBindState1 = internal::BindState<void(), void(), FakeInvoker>;
-using FakeBindState2 =
-    internal::BindState<void(), void(), FakeInvoker, FakeInvoker>;
-
 class CallbackTest : public ::testing::Test {
  public:
   CallbackTest()
-      : callback_a_(new FakeBindState1()),
-        callback_b_(new FakeBindState2()) {
+      : callback_a_(new FakeBindState1(), &NopInvokeFunc),
+        callback_b_(new FakeBindState2(), &NopInvokeFunc) {
   }
 
   ~CallbackTest() override {}
@@ -111,7 +88,7 @@
   EXPECT_FALSE(callback_b_.Equals(callback_a_));
 
   // We should compare based on instance, not type.
-  Callback<void()> callback_c(new FakeBindState1());
+  Callback<void()> callback_c(new FakeBindState1(), &NopInvokeFunc);
   Callback<void()> callback_a2 = callback_a_;
   EXPECT_TRUE(callback_a_.Equals(callback_a2));
   EXPECT_FALSE(callback_a_.Equals(callback_c));
diff --git a/base/cancelable_callback.h b/base/cancelable_callback.h
index 47dfb2d..0034fdd 100644
--- a/base/cancelable_callback.h
+++ b/base/cancelable_callback.h
@@ -42,6 +42,8 @@
 #ifndef BASE_CANCELABLE_CALLBACK_H_
 #define BASE_CANCELABLE_CALLBACK_H_
 
+#include <utility>
+
 #include "base/base_export.h"
 #include "base/bind.h"
 #include "base/callback.h"
@@ -103,7 +105,7 @@
 
  private:
   void Forward(A... args) const {
-    callback_.Run(args...);
+    callback_.Run(std::forward<A>(args)...);
   }
 
   // Helper method to bind |forwarder_| using a weak pointer from
diff --git a/base/cancelable_callback_unittest.cc b/base/cancelable_callback_unittest.cc
index 6d0a114..23b6c1c 100644
--- a/base/cancelable_callback_unittest.cc
+++ b/base/cancelable_callback_unittest.cc
@@ -4,13 +4,16 @@
 
 #include "base/cancelable_callback.h"
 
+#include <memory>
+
 #include "base/bind.h"
 #include "base/bind_helpers.h"
 #include "base/location.h"
+#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
 #include "base/run_loop.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -26,6 +29,10 @@
 void IncrementBy(int* count, int n) { (*count) += n; }
 void RefCountedParam(const scoped_refptr<TestRefCounted>& ref_counted) {}
 
+void OnMoveOnlyReceived(int* value, std::unique_ptr<int> result) {
+  *value = *result;
+}
+
 // Cancel().
 //  - Callback can be run multiple times.
 //  - After Cancel(), Run() completes but has no effect.
@@ -182,5 +189,17 @@
   EXPECT_EQ(1, count);
 }
 
+// CancelableCallback can be used with move-only types.
+TEST(CancelableCallbackTest, MoveOnlyType) {
+  const int kExpectedResult = 42;
+
+  int result = 0;
+  CancelableCallback<void(std::unique_ptr<int>)> cb(
+      base::Bind(&OnMoveOnlyReceived, base::Unretained(&result)));
+  cb.callback().Run(base::WrapUnique(new int(kExpectedResult)));
+
+  EXPECT_EQ(kExpectedResult, result);
+}
+
 }  // namespace
 }  // namespace base
diff --git a/base/command_line.cc b/base/command_line.cc
index 40b65b9..099bb18 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -149,10 +149,7 @@
 
 }  // namespace
 
-CommandLine::CommandLine(NoProgram /* no_program */)
-    : argv_(1),
-      begin_args_(1) {
-}
+CommandLine::CommandLine(NoProgram) : argv_(1), begin_args_(1) {}
 
 CommandLine::CommandLine(const FilePath& program)
     : argv_(1),
@@ -197,6 +194,17 @@
   DCHECK_EQ(wcscmp(kSwitchPrefixes[arraysize(kSwitchPrefixes) - 1], L"/"), 0);
   switch_prefix_count = arraysize(kSwitchPrefixes) - 1;
 }
+
+// static
+void CommandLine::InitUsingArgvForTesting(int argc, const char* const* argv) {
+  DCHECK(!current_process_commandline_);
+  current_process_commandline_ = new CommandLine(NO_PROGRAM);
+  // On Windows we need to convert the command line arguments to string16.
+  base::CommandLine::StringVector argv_vector;
+  for (int i = 0; i < argc; ++i)
+    argv_vector.push_back(UTF8ToUTF16(argv[i]));
+  current_process_commandline_->InitFromArgv(argv_vector);
+}
 #endif
 
 // static
@@ -443,7 +451,10 @@
 }
 
 CommandLine::StringType CommandLine::GetArgumentsStringInternal(
-    bool /* quote_placeholders */) const {
+    bool quote_placeholders) const {
+#if !defined(OS_WIN)
+  (void)quote_placeholders;  // Avoid an unused warning.
+#endif
   StringType params;
   // Append switches and arguments.
   bool parse_switches = true;
diff --git a/base/command_line.h b/base/command_line.h
index 3de8873..3d29f8f 100644
--- a/base/command_line.h
+++ b/base/command_line.h
@@ -33,15 +33,15 @@
  public:
 #if defined(OS_WIN)
   // The native command line string type.
-  typedef base::string16 StringType;
+  using StringType = string16;
 #elif defined(OS_POSIX)
-  typedef std::string StringType;
+  using StringType = std::string;
 #endif
 
-  typedef StringType::value_type CharType;
-  typedef std::vector<StringType> StringVector;
-  typedef std::map<std::string, StringType> SwitchMap;
-  typedef std::map<base::StringPiece, const StringType*> StringPieceSwitchMap;
+  using CharType = StringType::value_type;
+  using StringVector = std::vector<StringType>;
+  using SwitchMap = std::map<std::string, StringType>;
+  using StringPieceSwitchMap = std::map<StringPiece, const StringType*>;
 
   // A constructor for CommandLines that only carry switches and arguments.
   enum NoProgram { NO_PROGRAM };
@@ -69,6 +69,13 @@
   // object and the behavior will be the same as Posix systems (only hyphens
   // begin switches, everything else will be an arg).
   static void set_slash_is_not_a_switch();
+
+  // Normally when the CommandLine singleton is initialized it gets the command
+  // line via the GetCommandLineW API and then uses the shell32 API
+  // CommandLineToArgvW to parse the command line and convert it back to
+  // argc and argv. Tests who don't want this dependency on shell32 and need
+  // to honor the arguments passed in should use this function.
+  static void InitUsingArgvForTesting(int argc, const char* const* argv);
 #endif
 
   // Initialize the current process CommandLine singleton. On Windows, ignores
@@ -83,6 +90,7 @@
   // you want to reset the base library to its initial state (for example, in an
   // outer library that needs to be able to terminate, and be re-initialized).
   // If Init is called only once, as in main(), Reset() is not necessary.
+  // Do not call this in tests. Use base::test::ScopedCommandLine instead.
   static void Reset();
 
   // Get the singleton CommandLine representing the current process's
@@ -94,7 +102,7 @@
   static bool InitializedForCurrentProcess();
 
 #if defined(OS_WIN)
-  static CommandLine FromString(const base::string16& command_line);
+  static CommandLine FromString(const string16& command_line);
 #endif
 
   // Initialize from an argv vector.
@@ -152,15 +160,15 @@
   // The second override provides an optimized version to avoid inlining codegen
   // at every callsite to find the length of the constant and construct a
   // StringPiece.
-  bool HasSwitch(const base::StringPiece& switch_string) const;
+  bool HasSwitch(const StringPiece& switch_string) const;
   bool HasSwitch(const char switch_constant[]) const;
 
   // Returns the value associated with the given switch. If the switch has no
   // value or isn't present, this method returns the empty string.
   // Switch names must be lowercase.
-  std::string GetSwitchValueASCII(const base::StringPiece& switch_string) const;
-  FilePath GetSwitchValuePath(const base::StringPiece& switch_string) const;
-  StringType GetSwitchValueNative(const base::StringPiece& switch_string) const;
+  std::string GetSwitchValueASCII(const StringPiece& switch_string) const;
+  FilePath GetSwitchValuePath(const StringPiece& switch_string) const;
+  StringType GetSwitchValueNative(const StringPiece& switch_string) const;
 
   // Get a copy of all switches, along with their values.
   const SwitchMap& GetSwitches() const { return switches_; }
@@ -203,7 +211,7 @@
 #if defined(OS_WIN)
   // Initialize by parsing the given command line string.
   // The program name is assumed to be the first item in the string.
-  void ParseFromString(const base::string16& command_line);
+  void ParseFromString(const string16& command_line);
 #endif
 
  private:
diff --git a/base/command_line_unittest.cc b/base/command_line_unittest.cc
index 967ce1c..bcfc6c5 100644
--- a/base/command_line_unittest.cc
+++ b/base/command_line_unittest.cc
@@ -2,13 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/command_line.h"
+
+#include <memory>
 #include <string>
 #include <vector>
 
-#include "base/command_line.h"
 #include "base/files/file_path.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/utf_string_conversions.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -390,7 +391,8 @@
 
 // Test that copies of CommandLine have a valid StringPiece map.
 TEST(CommandLineTest, Copy) {
-  scoped_ptr<CommandLine> initial(new CommandLine(CommandLine::NO_PROGRAM));
+  std::unique_ptr<CommandLine> initial(
+      new CommandLine(CommandLine::NO_PROGRAM));
   initial->AppendSwitch("a");
   initial->AppendSwitch("bbbbbbbbbbbbbbb");
   initial->AppendSwitch("c");
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
index 339e9b7..c2a02de 100644
--- a/base/compiler_specific.h
+++ b/base/compiler_specific.h
@@ -7,6 +7,11 @@
 
 #include "build/build_config.h"
 
+#if defined(ANDROID)
+// Prefer Android's libbase definitions to our own.
+#include <android-base/macros.h>
+#endif  // defined(ANDROID)
+
 #if defined(COMPILER_MSVC)
 
 // For _Printf_format_string_.
@@ -187,4 +192,12 @@
 #endif  // defined(COMPILER_GCC)
 #endif  // !defined(UNLIKELY)
 
+// Compiler feature-detection.
+// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
+#if defined(__has_feature)
+#define HAS_FEATURE(FEATURE) __has_feature(FEATURE)
+#else
+#define HAS_FEATURE(FEATURE) 0
+#endif
+
 #endif  // BASE_COMPILER_SPECIFIC_H_
diff --git a/base/containers/hash_tables.h b/base/containers/hash_tables.h
index c421ddd..8da7b67 100644
--- a/base/containers/hash_tables.h
+++ b/base/containers/hash_tables.h
@@ -1,281 +1,75 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
-//
-
-//
-// Deal with the differences between Microsoft and GNU implemenations
-// of hash_map. Allows all platforms to use |base::hash_map| and
-// |base::hash_set|.
-//  eg:
-//   base::hash_map<int> my_map;
-//   base::hash_set<int> my_set;
-//
-// NOTE: It is an explicit non-goal of this class to provide a generic hash
-// function for pointers.  If you want to hash a pointers to a particular class,
-// please define the template specialization elsewhere (for example, in its
-// header file) and keep it specific to just pointers to that class.  This is
-// because identity hashes are not desirable for all types that might show up
-// in containers as pointers.
 
 #ifndef BASE_CONTAINERS_HASH_TABLES_H_
 #define BASE_CONTAINERS_HASH_TABLES_H_
 
-#include <stddef.h>
-#include <stdint.h>
-
-#include <utility>
-
-#include "base/strings/string16.h"
-#include "build/build_config.h"
-
-#if defined(COMPILER_MSVC)
+#include <cstddef>
 #include <unordered_map>
 #include <unordered_set>
+#include <utility>
 
-#define BASE_HASH_NAMESPACE std
+#include "base/hash.h"
 
-#elif defined(COMPILER_GCC)
+// This header file is deprecated. Use the corresponding C++11 type
+// instead. https://crbug.com/576864
 
+// Use a custom hasher instead.
 #define BASE_HASH_NAMESPACE base_hash
 
-// This is a hack to disable the gcc 4.4 warning about hash_map and hash_set
-// being deprecated.  We can get rid of this when we upgrade to VS2008 and we
-// can use <tr1/unordered_map> and <tr1/unordered_set>.
-#ifdef __DEPRECATED
-#define CHROME_OLD__DEPRECATED __DEPRECATED
-#undef __DEPRECATED
-#endif
-
-#include <ext/hash_map>
-#include <ext/hash_set>
-#define BASE_HASH_IMPL_NAMESPACE __gnu_cxx
-
-#include <string>
-
-#ifdef CHROME_OLD__DEPRECATED
-#define __DEPRECATED CHROME_OLD__DEPRECATED
-#undef CHROME_OLD__DEPRECATED
-#endif
-
 namespace BASE_HASH_NAMESPACE {
 
-// The pre-standard hash behaves like C++11's std::hash, except around pointers.
-// const char* is specialized to hash the C string and hash functions for
-// general T* are missing. Define a BASE_HASH_NAMESPACE::hash which aligns with
-// the C++11 behavior.
-
+// A separate hasher which, by default, forwards to std::hash. This is so legacy
+// uses of BASE_HASH_NAMESPACE with base::hash_map do not interfere with
+// std::hash mid-transition.
 template<typename T>
 struct hash {
-  std::size_t operator()(const T& value) const {
-    return BASE_HASH_IMPL_NAMESPACE::hash<T>()(value);
-  }
+  std::size_t operator()(const T& value) const { return std::hash<T>()(value); }
 };
 
-template<typename T>
-struct hash<T*> {
-  std::size_t operator()(T* value) const {
-    return BASE_HASH_IMPL_NAMESPACE::hash<uintptr_t>()(
-        reinterpret_cast<uintptr_t>(value));
+// Use base::IntPairHash from base/hash.h as a custom hasher instead.
+template <typename Type1, typename Type2>
+struct hash<std::pair<Type1, Type2>> {
+  std::size_t operator()(std::pair<Type1, Type2> value) const {
+    return base::HashInts(value.first, value.second);
   }
 };
 
-// The GNU C++ library provides identity hash functions for many integral types,
-// but not for |long long|.  This hash function will truncate if |size_t| is
-// narrower than |long long|.  This is probably good enough for what we will
-// use it for.
-
-#define DEFINE_TRIVIAL_HASH(integral_type) \
-    template<> \
-    struct hash<integral_type> { \
-      std::size_t operator()(integral_type value) const { \
-        return static_cast<std::size_t>(value); \
-      } \
-    }
-
-DEFINE_TRIVIAL_HASH(long long);
-DEFINE_TRIVIAL_HASH(unsigned long long);
-
-#undef DEFINE_TRIVIAL_HASH
-
-// Implement string hash functions so that strings of various flavors can
-// be used as keys in STL maps and sets.  The hash algorithm comes from the
-// GNU C++ library, in <tr1/functional>.  It is duplicated here because GCC
-// versions prior to 4.3.2 are unable to compile <tr1/functional> when RTTI
-// is disabled, as it is in our build.
-
-#define DEFINE_STRING_HASH(string_type) \
-    template<> \
-    struct hash<string_type> { \
-      std::size_t operator()(const string_type& s) const { \
-        std::size_t result = 0; \
-        for (string_type::const_iterator i = s.begin(); i != s.end(); ++i) \
-          result = (result * 131) + *i; \
-        return result; \
-      } \
-    }
-
-DEFINE_STRING_HASH(std::string);
-DEFINE_STRING_HASH(base::string16);
-
-#undef DEFINE_STRING_HASH
-
 }  // namespace BASE_HASH_NAMESPACE
 
-#else  // COMPILER
-#error define BASE_HASH_NAMESPACE for your compiler
-#endif  // COMPILER
-
 namespace base {
 
-// On MSVC, use the C++11 containers.
-#if defined(COMPILER_MSVC)
-
-template<class Key, class T,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
+// Use std::unordered_map instead.
+template <class Key,
+          class T,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<std::pair<const Key, T>>>
 using hash_map = std::unordered_map<Key, T, Hash, Pred, Alloc>;
 
-template<class Key, class T,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
+// Use std::unordered_multimap instead.
+template <class Key,
+          class T,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<std::pair<const Key, T>>>
 using hash_multimap = std::unordered_multimap<Key, T, Hash, Pred, Alloc>;
 
-template<class Key,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
+// Use std::unordered_multiset instead.
+template <class Key,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<Key>>
 using hash_multiset = std::unordered_multiset<Key, Hash, Pred, Alloc>;
 
-template<class Key,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
+// Use std::unordered_set instead.
+template <class Key,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<Key>>
 using hash_set = std::unordered_set<Key, Hash, Pred, Alloc>;
 
-#else  // !COMPILER_MSVC
-
-// Otherwise, use the pre-standard ones, but override the default hash to match
-// C++11.
-template<class Key, class T,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
-using hash_map = BASE_HASH_IMPL_NAMESPACE::hash_map<Key, T, Hash, Pred, Alloc>;
-
-template<class Key, class T,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
-using hash_multimap =
-    BASE_HASH_IMPL_NAMESPACE::hash_multimap<Key, T, Hash, Pred, Alloc>;
-
-template<class Key,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
-using hash_multiset =
-    BASE_HASH_IMPL_NAMESPACE::hash_multiset<Key, Hash, Pred, Alloc>;
-
-template<class Key,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
-using hash_set = BASE_HASH_IMPL_NAMESPACE::hash_set<Key, Hash, Pred, Alloc>;
-
-#undef BASE_HASH_IMPL_NAMESPACE
-
-#endif  // COMPILER_MSVC
-
-// Implement hashing for pairs of at-most 32 bit integer values.
-// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
-// multiply-add hashing. This algorithm, as described in
-// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
-// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
-//
-//   h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
-//
-// Contact danakj@chromium.org for any questions.
-inline std::size_t HashInts32(uint32_t value1, uint32_t value2) {
-  uint64_t value1_64 = value1;
-  uint64_t hash64 = (value1_64 << 32) | value2;
-
-  if (sizeof(std::size_t) >= sizeof(uint64_t))
-    return static_cast<std::size_t>(hash64);
-
-  uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
-  uint32_t shift_random = 10121U << 16;
-
-  hash64 = hash64 * odd_random + shift_random;
-  std::size_t high_bits = static_cast<std::size_t>(
-      hash64 >> (8 * (sizeof(uint64_t) - sizeof(std::size_t))));
-  return high_bits;
-}
-
-// Implement hashing for pairs of up-to 64-bit integer values.
-// We use the compound integer hash method to produce a 64-bit hash code, by
-// breaking the two 64-bit inputs into 4 32-bit values:
-// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
-// Then we reduce our result to 32 bits if required, similar to above.
-inline std::size_t HashInts64(uint64_t value1, uint64_t value2) {
-  uint32_t short_random1 = 842304669U;
-  uint32_t short_random2 = 619063811U;
-  uint32_t short_random3 = 937041849U;
-  uint32_t short_random4 = 3309708029U;
-
-  uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
-  uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
-  uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
-  uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
-
-  uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
-  uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
-  uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
-  uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
-
-  uint64_t hash64 = product1 + product2 + product3 + product4;
-
-  if (sizeof(std::size_t) >= sizeof(uint64_t))
-    return static_cast<std::size_t>(hash64);
-
-  uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
-  uint32_t shift_random = 20591U << 16;
-
-  hash64 = hash64 * odd_random + shift_random;
-  std::size_t high_bits = static_cast<std::size_t>(
-      hash64 >> (8 * (sizeof(uint64_t) - sizeof(std::size_t))));
-  return high_bits;
-}
-
-template<typename T1, typename T2>
-inline std::size_t HashPair(T1 value1, T2 value2) {
-  // This condition is expected to be compile-time evaluated and optimised away
-  // in release builds.
-  if (sizeof(T1) > sizeof(uint32_t) || (sizeof(T2) > sizeof(uint32_t)))
-    return HashInts64(value1, value2);
-
-  return HashInts32(value1, value2);
-}
-
 }  // namespace base
 
-namespace BASE_HASH_NAMESPACE {
-
-// Implement methods for hashing a pair of integers, so they can be used as
-// keys in STL containers.
-
-template<typename Type1, typename Type2>
-struct hash<std::pair<Type1, Type2> > {
-  std::size_t operator()(std::pair<Type1, Type2> value) const {
-    return base::HashPair(value.first, value.second);
-  }
-};
-
-}  // namespace BASE_HASH_NAMESPACE
-
-#undef DEFINE_PAIR_HASH_FUNCTION_START
-#undef DEFINE_PAIR_HASH_FUNCTION_END
-
 #endif  // BASE_CONTAINERS_HASH_TABLES_H_
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
index 272a773..6c1d626 100644
--- a/base/containers/mru_cache.h
+++ b/base/containers/mru_cache.h
@@ -19,11 +19,12 @@
 #include <stddef.h>
 
 #include <algorithm>
+#include <functional>
 #include <list>
 #include <map>
+#include <unordered_map>
 #include <utility>
 
-#include "base/containers/hash_tables.h"
 #include "base/logging.h"
 #include "base/macros.h"
 
@@ -34,16 +35,17 @@
 // This template is used to standardize map type containers that can be used
 // by MRUCacheBase. This level of indirection is necessary because of the way
 // that template template params and default template params interact.
-template <class KeyType, class ValueType>
+template <class KeyType, class ValueType, class CompareType>
 struct MRUCacheStandardMap {
-  typedef std::map<KeyType, ValueType> Type;
+  typedef std::map<KeyType, ValueType, CompareType> Type;
 };
 
 // Base class for the MRU cache specializations defined below.
-// The deletor will get called on all payloads that are being removed or
-// replaced.
-template <class KeyType, class PayloadType, class DeletorType,
-          template <typename, typename> class MapType = MRUCacheStandardMap>
+template <class KeyType,
+          class PayloadType,
+          class HashOrCompareType,
+          template <typename, typename, typename> class MapType =
+              MRUCacheStandardMap>
 class MRUCacheBase {
  public:
   // The payload of the list. This maintains a copy of the key so we can
@@ -53,7 +55,8 @@
  private:
   typedef std::list<value_type> PayloadList;
   typedef typename MapType<KeyType,
-                           typename PayloadList::iterator>::Type KeyIndex;
+                           typename PayloadList::iterator,
+                           HashOrCompareType>::Type KeyIndex;
 
  public:
   typedef typename PayloadList::size_type size_type;
@@ -69,18 +72,9 @@
   // a new item is inserted. If the caller wants to manager this itself (for
   // example, maybe it has special work to do when something is evicted), it
   // can pass NO_AUTO_EVICT to not restrict the cache size.
-  explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {
-  }
+  explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
 
-  MRUCacheBase(size_type max_size, const DeletorType& deletor)
-      : max_size_(max_size), deletor_(deletor) {
-  }
-
-  virtual ~MRUCacheBase() {
-    iterator i = begin();
-    while (i != end())
-      i = Erase(i);
-  }
+  virtual ~MRUCacheBase() {}
 
   size_type max_size() const { return max_size_; }
 
@@ -88,14 +82,14 @@
   // the same key, it is removed prior to insertion. An iterator indicating the
   // inserted item will be returned (this will always be the front of the list).
   //
-  // The payload will be copied. In the case of an OwningMRUCache, this function
-  // will take ownership of the pointer.
-  iterator Put(const KeyType& key, const PayloadType& payload) {
+  // The payload will be forwarded.
+  template <typename Payload>
+  iterator Put(const KeyType& key, Payload&& payload) {
     // Remove any existing payload with that key.
     typename KeyIndex::iterator index_iter = index_.find(key);
     if (index_iter != index_.end()) {
-      // Erase the reference to it. This will call the deletor on the removed
-      // element. The index reference will be replaced in the code below.
+      // Erase the reference to it. The index reference will be replaced in the
+      // code below.
       Erase(index_iter->second);
     } else if (max_size_ != NO_AUTO_EVICT) {
       // New item is being inserted which might make it larger than the maximum
@@ -103,7 +97,7 @@
       ShrinkToSize(max_size_ - 1);
     }
 
-    ordering_.push_front(value_type(key, payload));
+    ordering_.push_front(value_type(key, std::forward<Payload>(payload)));
     index_.insert(std::make_pair(key, ordering_.begin()));
     return ordering_.begin();
   }
@@ -144,14 +138,12 @@
   void Swap(MRUCacheBase& other) {
     ordering_.swap(other.ordering_);
     index_.swap(other.index_);
-    std::swap(deletor_, other.deletor_);
     std::swap(max_size_, other.max_size_);
   }
 
   // Erases the item referenced by the given iterator. An iterator to the item
   // following it will be returned. The iterator must be valid.
   iterator Erase(iterator pos) {
-    deletor_(pos->second);
     index_.erase(pos->first);
     return ordering_.erase(pos);
   }
@@ -174,9 +166,6 @@
 
   // Deletes everything from the cache.
   void Clear() {
-    for (typename PayloadList::iterator i(ordering_.begin());
-         i != ordering_.end(); ++i)
-      deletor_(i->second);
     index_.clear();
     ordering_.clear();
   }
@@ -213,101 +202,50 @@
 
   size_type max_size_;
 
-  DeletorType deletor_;
-
   DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
 };
 
 // MRUCache --------------------------------------------------------------------
 
-// A functor that does nothing. Used by the MRUCache.
-template<class PayloadType>
-class MRUCacheNullDeletor {
- public:
-  void operator()(const PayloadType& payload) {}
-};
-
 // A container that does not do anything to free its data. Use this when storing
 // value types (as opposed to pointers) in the list.
 template <class KeyType, class PayloadType>
-class MRUCache : public MRUCacheBase<KeyType,
-                                     PayloadType,
-                                     MRUCacheNullDeletor<PayloadType> > {
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, std::less<KeyType>> {
  private:
-  typedef MRUCacheBase<KeyType, PayloadType,
-      MRUCacheNullDeletor<PayloadType> > ParentType;
+  using ParentType = MRUCacheBase<KeyType, PayloadType, std::less<KeyType>>;
 
  public:
   // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
   explicit MRUCache(typename ParentType::size_type max_size)
-      : ParentType(max_size) {
-  }
-  virtual ~MRUCache() {
-  }
+      : ParentType(max_size) {}
+  virtual ~MRUCache() {}
 
  private:
   DISALLOW_COPY_AND_ASSIGN(MRUCache);
 };
 
-// OwningMRUCache --------------------------------------------------------------
-
-template<class PayloadType>
-class MRUCachePointerDeletor {
- public:
-  void operator()(const PayloadType& payload) { delete payload; }
-};
-
-// A cache that owns the payload type, which must be a non-const pointer type.
-// The pointers will be deleted when they are removed, replaced, or when the
-// cache is destroyed.
-template <class KeyType, class PayloadType>
-class OwningMRUCache
-    : public MRUCacheBase<KeyType,
-                          PayloadType,
-                          MRUCachePointerDeletor<PayloadType> > {
- private:
-  typedef MRUCacheBase<KeyType, PayloadType,
-      MRUCachePointerDeletor<PayloadType> > ParentType;
-
- public:
-  // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
-  explicit OwningMRUCache(typename ParentType::size_type max_size)
-      : ParentType(max_size) {
-  }
-  virtual ~OwningMRUCache() {
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(OwningMRUCache);
-};
-
 // HashingMRUCache ------------------------------------------------------------
 
-template <class KeyType, class ValueType>
+template <class KeyType, class ValueType, class HashType>
 struct MRUCacheHashMap {
-  typedef base::hash_map<KeyType, ValueType> Type;
+  typedef std::unordered_map<KeyType, ValueType, HashType> Type;
 };
 
-// This class is similar to MRUCache, except that it uses base::hash_map as
-// the map type instead of std::map. Note that your KeyType must be hashable
-// to use this cache.
-template <class KeyType, class PayloadType>
-class HashingMRUCache : public MRUCacheBase<KeyType,
-                                            PayloadType,
-                                            MRUCacheNullDeletor<PayloadType>,
-                                            MRUCacheHashMap> {
+// This class is similar to MRUCache, except that it uses std::unordered_map as
+// the map type instead of std::map. Note that your KeyType must be hashable to
+// use this cache or you need to provide a hashing class.
+template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>>
+class HashingMRUCache
+    : public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap> {
  private:
-  typedef MRUCacheBase<KeyType, PayloadType,
-                       MRUCacheNullDeletor<PayloadType>,
-                       MRUCacheHashMap> ParentType;
+  using ParentType =
+      MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>;
 
  public:
   // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
   explicit HashingMRUCache(typename ParentType::size_type max_size)
-      : ParentType(max_size) {
-  }
-  virtual ~HashingMRUCache() {
-  }
+      : ParentType(max_size) {}
+  virtual ~HashingMRUCache() {}
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
diff --git a/base/containers/scoped_ptr_hash_map.h b/base/containers/scoped_ptr_hash_map.h
index 189c314..f513f06 100644
--- a/base/containers/scoped_ptr_hash_map.h
+++ b/base/containers/scoped_ptr_hash_map.h
@@ -8,17 +8,19 @@
 #include <stddef.h>
 
 #include <algorithm>
+#include <memory>
 #include <utility>
 
 #include "base/containers/hash_tables.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/stl_util.h"
 
 namespace base {
 
-// This type acts like a hash_map<K, scoped_ptr<V, D> >, based on top of
+// Deprecated. Use std::unordered_map instead. https://crbug.com/579229
+//
+// This type acts like a hash_map<K, std::unique_ptr<V, D> >, based on top of
 // base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
 // structure.
 template <typename Key, typename ScopedPtr>
diff --git a/base/containers/small_map.h b/base/containers/small_map.h
index 427736c..82ed6c5 100644
--- a/base/containers/small_map.h
+++ b/base/containers/small_map.h
@@ -517,7 +517,7 @@
       array_[i].Destroy();
       --size_;
       if (i != size_) {
-        array_[i].Init(*array_[size_]);
+        array_[i].InitFromMove(std::move(array_[size_]));
         array_[size_].Destroy();
       }
     } else {
@@ -594,7 +594,7 @@
     ManualConstructor<value_type> temp_array[kArraySize];
 
     for (int i = 0; i < kArraySize; i++) {
-      temp_array[i].Init(*array_[i]);
+      temp_array[i].InitFromMove(std::move(array_[i]));
       array_[i].Destroy();
     }
 
@@ -604,7 +604,7 @@
 
     // Insert elements into it.
     for (int i = 0; i < kArraySize; i++) {
-      map_->insert(*temp_array[i]);
+      map_->insert(std::move(*temp_array[i]));
       temp_array[i].Destroy();
     }
   }
diff --git a/base/cpu.cc b/base/cpu.cc
index 7135445..de4a001 100644
--- a/base/cpu.cc
+++ b/base/cpu.cc
@@ -7,13 +7,11 @@
 #include <limits.h>
 #include <stddef.h>
 #include <stdint.h>
-#include <stdlib.h>
 #include <string.h>
 
 #include <algorithm>
 
 #include "base/macros.h"
-#include "base/strings/string_piece.h"
 #include "build/build_config.h"
 
 #if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
@@ -49,7 +47,6 @@
     has_avx2_(false),
     has_aesni_(false),
     has_non_stop_time_stamp_counter_(false),
-    has_broken_neon_(false),
     cpu_vendor_("unknown") {
   Initialize();
 }
@@ -99,7 +96,7 @@
 #if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
 class LazyCpuInfoValue {
  public:
-  LazyCpuInfoValue() : has_broken_neon_(false) {
+  LazyCpuInfoValue() {
     // This function finds the value from /proc/cpuinfo under the key "model
     // name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
     // and later for arm64) and is shown once per CPU. "Processor" is used in
@@ -108,21 +105,6 @@
     const char kModelNamePrefix[] = "model name\t: ";
     const char kProcessorPrefix[] = "Processor\t: ";
 
-    // This function also calculates whether we believe that this CPU has a
-    // broken NEON unit based on these fields from cpuinfo:
-    unsigned implementer = 0, architecture = 0, variant = 0, part = 0,
-             revision = 0;
-    const struct {
-      const char key[17];
-      unsigned int* result;
-    } kUnsignedValues[] = {
-      {"CPU implementer", &implementer},
-      {"CPU architecture", &architecture},
-      {"CPU variant", &variant},
-      {"CPU part", &part},
-      {"CPU revision", &revision},
-    };
-
     std::string contents;
     ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
     DCHECK(!contents.empty());
@@ -138,52 +120,13 @@
            line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)) {
         brand_.assign(line.substr(strlen(kModelNamePrefix)));
       }
-
-      for (size_t i = 0; i < arraysize(kUnsignedValues); i++) {
-        const char *key = kUnsignedValues[i].key;
-        const size_t len = strlen(key);
-
-        if (line.compare(0, len, key) == 0 &&
-            line.size() >= len + 1 &&
-            (line[len] == '\t' || line[len] == ' ' || line[len] == ':')) {
-          size_t colon_pos = line.find(':', len);
-          if (colon_pos == std::string::npos) {
-            continue;
-          }
-
-          const StringPiece line_sp(line);
-          StringPiece value_sp = line_sp.substr(colon_pos + 1);
-          while (!value_sp.empty() &&
-                 (value_sp[0] == ' ' || value_sp[0] == '\t')) {
-            value_sp = value_sp.substr(1);
-          }
-
-          // The string may have leading "0x" or not, so we use strtoul to
-          // handle that.
-          char* endptr;
-          std::string value(value_sp.as_string());
-          unsigned long int result = strtoul(value.c_str(), &endptr, 0);
-          if (*endptr == 0 && result <= UINT_MAX) {
-            *kUnsignedValues[i].result = result;
-          }
-        }
-      }
     }
-
-    has_broken_neon_ =
-      implementer == 0x51 &&
-      architecture == 7 &&
-      variant == 1 &&
-      part == 0x4d &&
-      revision == 0;
   }
 
   const std::string& brand() const { return brand_; }
-  bool has_broken_neon() const { return has_broken_neon_; }
 
  private:
   std::string brand_;
-  bool has_broken_neon_;
   DISALLOW_COPY_AND_ASSIGN(LazyCpuInfoValue);
 };
 
@@ -277,7 +220,6 @@
   }
 #elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
   cpu_brand_.assign(g_lazy_cpuinfo.Get().brand());
-  has_broken_neon_ = g_lazy_cpuinfo.Get().has_broken_neon();
 #endif
 }
 
diff --git a/base/cpu.h b/base/cpu.h
index 8c3c06c..0e4303b 100644
--- a/base/cpu.h
+++ b/base/cpu.h
@@ -52,10 +52,6 @@
   bool has_non_stop_time_stamp_counter() const {
     return has_non_stop_time_stamp_counter_;
   }
-  // has_broken_neon is only valid on ARM chips. If true, it indicates that we
-  // believe that the NEON unit on the current CPU is flawed and cannot execute
-  // some code. See https://code.google.com/p/chromium/issues/detail?id=341598
-  bool has_broken_neon() const { return has_broken_neon_; }
 
   IntelMicroArchitecture GetIntelMicroArchitecture() const;
   const std::string& cpu_brand() const { return cpu_brand_; }
@@ -82,7 +78,6 @@
   bool has_avx2_;
   bool has_aesni_;
   bool has_non_stop_time_stamp_counter_;
-  bool has_broken_neon_;
   std::string cpu_vendor_;
   std::string cpu_brand_;
 };
diff --git a/base/debug/OWNERS b/base/debug/OWNERS
deleted file mode 100644
index 4976ab1..0000000
--- a/base/debug/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-per-file trace_event*=nduca@chromium.org
-per-file trace_event*=dsinclair@chromium.org
-per-file trace_event_android.cc=wangxianzhu@chromium.org
diff --git a/base/debug/alias.cc b/base/debug/alias.cc
index d498084..ff35574 100644
--- a/base/debug/alias.cc
+++ b/base/debug/alias.cc
@@ -12,8 +12,7 @@
 #pragma optimize("", off)
 #endif
 
-void Alias(const void* /* var */) {
-}
+void Alias(const void*) {}
 
 #if defined(COMPILER_MSVC)
 #pragma optimize("", on)
diff --git a/base/debug/debugger_posix.cc b/base/debug/debugger_posix.cc
index d7e492b..a157d9a 100644
--- a/base/debug/debugger_posix.cc
+++ b/base/debug/debugger_posix.cc
@@ -3,8 +3,6 @@
 // found in the LICENSE file.
 
 #include "base/debug/debugger.h"
-#include "base/macros.h"
-#include "build/build_config.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -16,8 +14,12 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <memory>
 #include <vector>
 
+#include "base/macros.h"
+#include "build/build_config.h"
+
 #if defined(__GLIBCXX__)
 #include <cxxabi.h>
 #endif
@@ -38,7 +40,6 @@
 
 #include "base/debug/alias.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/string_piece.h"
 
diff --git a/base/debug/debugging_flags.h b/base/debug/debugging_flags.h
new file mode 100644
index 0000000..1ea435f
--- /dev/null
+++ b/base/debug/debugging_flags.h
@@ -0,0 +1,11 @@
+// Generated by build/write_buildflag_header.py
+// From "base_debugging_flags"
+
+#ifndef BASE_DEBUG_DEBUGGING_FLAGS_H_
+#define BASE_DEBUG_DEBUGGING_FLAGS_H_
+
+#include "build/buildflag.h"
+
+#define BUILDFLAG_INTERNAL_ENABLE_PROFILING() (0)
+
+#endif  // BASE_DEBUG_DEBUGGING_FLAGS_H_
diff --git a/base/debug/leak_annotations.h b/base/debug/leak_annotations.h
new file mode 100644
index 0000000..dc50246
--- /dev/null
+++ b/base/debug/leak_annotations.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_ANNOTATIONS_H_
+#define BASE_DEBUG_LEAK_ANNOTATIONS_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// This file defines macros which can be used to annotate intentional memory
+// leaks. Support for annotations is implemented in LeakSanitizer. Annotated
+// objects will be treated as a source of live pointers, i.e. any heap objects
+// reachable by following pointers from an annotated object will not be
+// reported as leaks.
+//
+// ANNOTATE_SCOPED_MEMORY_LEAK: all allocations made in the current scope
+// will be annotated as leaks.
+// ANNOTATE_LEAKING_OBJECT_PTR(X): the heap object referenced by pointer X will
+// be annotated as a leak.
+
+#if defined(LEAK_SANITIZER) && !defined(OS_NACL)
+
+#include <sanitizer/lsan_interface.h>
+
+class ScopedLeakSanitizerDisabler {
+ public:
+  ScopedLeakSanitizerDisabler() { __lsan_disable(); }
+  ~ScopedLeakSanitizerDisabler() { __lsan_enable(); }
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedLeakSanitizerDisabler);
+};
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK \
+    ScopedLeakSanitizerDisabler leak_sanitizer_disabler; static_cast<void>(0)
+
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) __lsan_ignore_object(X);
+
+#else
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK ((void)0)
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) ((void)0)
+
+#endif
+
+#endif  // BASE_DEBUG_LEAK_ANNOTATIONS_H_
diff --git a/base/debug/leak_tracker_unittest.cc b/base/debug/leak_tracker_unittest.cc
index 99df4c1..8b4c568 100644
--- a/base/debug/leak_tracker_unittest.cc
+++ b/base/debug/leak_tracker_unittest.cc
@@ -3,7 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/debug/leak_tracker.h"
-#include "base/memory/scoped_ptr.h"
+
+#include <memory>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -29,9 +31,9 @@
   EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
 
   // Use scoped_ptr so compiler doesn't complain about unused variables.
-  scoped_ptr<ClassA> a1(new ClassA);
-  scoped_ptr<ClassB> b1(new ClassB);
-  scoped_ptr<ClassB> b2(new ClassB);
+  std::unique_ptr<ClassA> a1(new ClassA);
+  std::unique_ptr<ClassB> b1(new ClassB);
+  std::unique_ptr<ClassB> b2(new ClassB);
 
   EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
   EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
@@ -52,7 +54,7 @@
     EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
     EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
 
-    scoped_ptr<ClassA> a2(new ClassA);
+    std::unique_ptr<ClassA> a2(new ClassA);
 
     EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
     EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
@@ -72,10 +74,10 @@
 TEST(LeakTrackerTest, LinkedList) {
   EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
 
-  scoped_ptr<ClassA> a1(new ClassA);
-  scoped_ptr<ClassA> a2(new ClassA);
-  scoped_ptr<ClassA> a3(new ClassA);
-  scoped_ptr<ClassA> a4(new ClassA);
+  std::unique_ptr<ClassA> a1(new ClassA);
+  std::unique_ptr<ClassA> a2(new ClassA);
+  std::unique_ptr<ClassA> a3(new ClassA);
+  std::unique_ptr<ClassA> a4(new ClassA);
 
   EXPECT_EQ(4, LeakTracker<ClassA>::NumLiveInstances());
 
@@ -88,7 +90,7 @@
   EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
 
   // Append to the new tail of the list (a3).
-  scoped_ptr<ClassA> a5(new ClassA);
+  std::unique_ptr<ClassA> a5(new ClassA);
   EXPECT_EQ(3, LeakTracker<ClassA>::NumLiveInstances());
 
   a2.reset();
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 2250c8f..ac0ead7 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -11,6 +11,12 @@
 
 #include "base/macros.h"
 
+#if HAVE_TRACE_STACK_FRAME_POINTERS && defined(OS_ANDROID)
+#include <pthread.h>
+#include "base/process/process_handle.h"
+#include "base/threading/platform_thread.h"
+#endif
+
 namespace base {
 namespace debug {
 
@@ -39,5 +45,103 @@
   return stream.str();
 }
 
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+
+#if defined(OS_ANDROID)
+
+static uintptr_t GetStackEnd() {
+  // Bionic reads proc/maps on every call to pthread_getattr_np() when called
+  // from the main thread. So we need to cache end of stack in that case to get
+  // acceptable performance.
+  // For all other threads pthread_getattr_np() is fast enough as it just reads
+  // values from its pthread_t argument.
+  static uintptr_t main_stack_end = 0;
+
+  bool is_main_thread = GetCurrentProcId() == PlatformThread::CurrentId();
+
+  if (is_main_thread && main_stack_end) {
+    return main_stack_end;
+  }
+
+  uintptr_t stack_begin = 0;
+  size_t stack_size = 0;
+  pthread_attr_t attributes;
+  int error = pthread_getattr_np(pthread_self(), &attributes);
+  if (!error) {
+    error = pthread_attr_getstack(
+        &attributes,
+        reinterpret_cast<void**>(&stack_begin),
+        &stack_size);
+    pthread_attr_destroy(&attributes);
+  }
+  DCHECK(!error);
+
+  uintptr_t stack_end = stack_begin + stack_size;
+  if (is_main_thread) {
+    main_stack_end = stack_end;
+  }
+  return stack_end;
+}
+
+#endif  // defined(OS_ANDROID)
+
+size_t TraceStackFramePointers(const void** out_trace,
+                               size_t max_depth,
+                               size_t skip_initial) {
+  // Usage of __builtin_frame_address() enables frame pointers in this
+  // function even if they are not enabled globally. So 'sp' will always
+  // be valid.
+  uintptr_t sp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+
+#if defined(OS_ANDROID)
+  uintptr_t stack_end = GetStackEnd();
+#endif
+
+  size_t depth = 0;
+  while (depth < max_depth) {
+#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
+    // GCC and LLVM generate slightly different frames on ARM, see
+    // https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
+    // x86-compatible frame, while GCC needs adjustment.
+    sp -= sizeof(uintptr_t);
+#endif
+
+#if defined(OS_ANDROID)
+    // Both sp[0] and s[1] must be valid.
+    if (sp + 2 * sizeof(uintptr_t) > stack_end) {
+      break;
+    }
+#endif
+
+    if (skip_initial != 0) {
+      skip_initial--;
+    } else {
+      out_trace[depth++] = reinterpret_cast<const void**>(sp)[1];
+    }
+
+    // Find out next frame pointer
+    // (heuristics are from TCMalloc's stacktrace functions)
+    {
+      uintptr_t next_sp = reinterpret_cast<const uintptr_t*>(sp)[0];
+
+      // With the stack growing downwards, older stack frame must be
+      // at a greater address that the current one.
+      if (next_sp <= sp) break;
+
+      // Assume stack frames larger than 100,000 bytes are bogus.
+      if (next_sp - sp > 100000) break;
+
+      // Check alignment.
+      if (sp & (sizeof(void*) - 1)) break;
+
+      sp = next_sp;
+    }
+  }
+
+  return depth;
+}
+
+#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
+
 }  // namespace debug
 }  // namespace base
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index 07e119a..23e7b51 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -22,6 +22,14 @@
 struct _CONTEXT;
 #endif
 
+#if defined(OS_POSIX) && ( \
+    defined(__i386__) || defined(__x86_64__) || \
+    (defined(__arm__) && !defined(__thumb__)))
+#define HAVE_TRACE_STACK_FRAME_POINTERS 1
+#else
+#define HAVE_TRACE_STACK_FRAME_POINTERS 0
+#endif
+
 namespace base {
 namespace debug {
 
@@ -93,6 +101,20 @@
   size_t count_;
 };
 
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+// Traces the stack by using frame pointers. This function is faster but less
+// reliable than StackTrace. It should work for debug and profiling builds,
+// but not for release builds (although there are some exceptions).
+//
+// Writes at most |max_depth| frames (instruction pointers) into |out_trace|
+// after skipping |skip_initial| frames. Note that the function itself is not
+// added to the trace so |skip_initial| should be 0 in most cases.
+// Returns number of frames written.
+BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
+                                           size_t max_depth,
+                                           size_t skip_initial);
+#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
+
 namespace internal {
 
 #if defined(OS_POSIX) && !defined(OS_ANDROID)
diff --git a/base/debug/stack_trace_posix.cc b/base/debug/stack_trace_posix.cc
index 98e6c2e..3c0299c 100644
--- a/base/debug/stack_trace_posix.cc
+++ b/base/debug/stack_trace_posix.cc
@@ -17,6 +17,7 @@
 #include <unistd.h>
 
 #include <map>
+#include <memory>
 #include <ostream>
 #include <string>
 #include <vector>
@@ -36,7 +37,7 @@
 #include "base/debug/proc_maps_linux.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/free_deleter.h"
 #include "base/memory/singleton.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/posix/eintr_wrapper.h"
@@ -95,7 +96,7 @@
 
     // Try to demangle the mangled symbol candidate.
     int status = 0;
-    scoped_ptr<char, base::FreeDeleter> demangled_symbol(
+    std::unique_ptr<char, base::FreeDeleter> demangled_symbol(
         abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
     if (status == 0) {  // Demangling is successful.
       // Remove the mangled symbol.
@@ -177,14 +178,14 @@
 
     handler->HandleOutput("\n");
   }
-#elif !defined(__UCLIBC__)
+#else
   bool printed = false;
 
   // Below part is async-signal unsafe (uses malloc), so execute it only
   // when we are not executing the signal handler.
   if (in_signal_handler == 0) {
-    scoped_ptr<char*, FreeDeleter>
-        trace_symbols(backtrace_symbols(trace, size));
+    std::unique_ptr<char*, FreeDeleter> trace_symbols(
+        backtrace_symbols(trace, size));
     if (trace_symbols.get()) {
       for (size_t i = 0; i < size; ++i) {
         std::string trace_symbol = trace_symbols.get()[i];
@@ -214,9 +215,7 @@
   ignore_result(HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output))));
 }
 
-void StackDumpSignalHandler(int signal,
-                            siginfo_t* info,
-                            void* void_context) {
+void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
   (void)void_context;  // unused depending on build context
   // NOTE: This code MUST be async-signal safe.
   // NO malloc or stdio is allowed here.
diff --git a/base/debug/task_annotator.h b/base/debug/task_annotator.h
index 443c71b..2687c5c 100644
--- a/base/debug/task_annotator.h
+++ b/base/debug/task_annotator.h
@@ -39,11 +39,6 @@
   DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
 };
 
-#define TRACE_TASK_EXECUTION(run_function, task)           \
-  TRACE_EVENT2("toplevel", (run_function), "src_file",     \
-               (task).posted_from.file_name(), "src_func", \
-               (task).posted_from.function_name());
-
 }  // namespace debug
 }  // namespace base
 
diff --git a/base/environment.cc b/base/environment.cc
index adb7387..534a7a8 100644
--- a/base/environment.cc
+++ b/base/environment.cc
@@ -8,6 +8,7 @@
 
 #include <vector>
 
+#include "base/memory/ptr_util.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversions.h"
@@ -25,7 +26,7 @@
 
 class EnvironmentImpl : public Environment {
  public:
-  bool GetVar(const char* variable_name, std::string* result) override {
+  bool GetVar(StringPiece variable_name, std::string* result) override {
     if (GetVarImpl(variable_name, result))
       return true;
 
@@ -35,28 +36,28 @@
     // I.e. HTTP_PROXY may be http_proxy for some users/systems.
     char first_char = variable_name[0];
     std::string alternate_case_var;
-    if (first_char >= 'a' && first_char <= 'z')
+    if (IsAsciiLower(first_char))
       alternate_case_var = ToUpperASCII(variable_name);
-    else if (first_char >= 'A' && first_char <= 'Z')
+    else if (IsAsciiUpper(first_char))
       alternate_case_var = ToLowerASCII(variable_name);
     else
       return false;
     return GetVarImpl(alternate_case_var.c_str(), result);
   }
 
-  bool SetVar(const char* variable_name,
+  bool SetVar(StringPiece variable_name,
               const std::string& new_value) override {
     return SetVarImpl(variable_name, new_value);
   }
 
-  bool UnSetVar(const char* variable_name) override {
+  bool UnSetVar(StringPiece variable_name) override {
     return UnSetVarImpl(variable_name);
   }
 
  private:
-  bool GetVarImpl(const char* variable_name, std::string* result) {
+  bool GetVarImpl(StringPiece variable_name, std::string* result) {
 #if defined(OS_POSIX)
-    const char* env_value = getenv(variable_name);
+    const char* env_value = getenv(variable_name.data());
     if (!env_value)
       return false;
     // Note that the variable may be defined but empty.
@@ -64,12 +65,12 @@
       *result = env_value;
     return true;
 #elif defined(OS_WIN)
-    DWORD value_length = ::GetEnvironmentVariable(
-        UTF8ToWide(variable_name).c_str(), NULL, 0);
+    DWORD value_length =
+        ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr, 0);
     if (value_length == 0)
       return false;
     if (result) {
-      scoped_ptr<wchar_t[]> value(new wchar_t[value_length]);
+      std::unique_ptr<wchar_t[]> value(new wchar_t[value_length]);
       ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), value.get(),
                                value_length);
       *result = WideToUTF8(value.get());
@@ -80,10 +81,10 @@
 #endif
   }
 
-  bool SetVarImpl(const char* variable_name, const std::string& new_value) {
+  bool SetVarImpl(StringPiece variable_name, const std::string& new_value) {
 #if defined(OS_POSIX)
     // On success, zero is returned.
-    return !setenv(variable_name, new_value.c_str(), 1);
+    return !setenv(variable_name.data(), new_value.c_str(), 1);
 #elif defined(OS_WIN)
     // On success, a nonzero value is returned.
     return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(),
@@ -91,13 +92,13 @@
 #endif
   }
 
-  bool UnSetVarImpl(const char* variable_name) {
+  bool UnSetVarImpl(StringPiece variable_name) {
 #if defined(OS_POSIX)
     // On success, zero is returned.
-    return !unsetenv(variable_name);
+    return !unsetenv(variable_name.data());
 #elif defined(OS_WIN)
     // On success, a nonzero value is returned.
-    return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), NULL);
+    return !!SetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), nullptr);
 #endif
   }
 };
@@ -134,12 +135,12 @@
 Environment::~Environment() {}
 
 // static
-Environment* Environment::Create() {
-  return new EnvironmentImpl();
+std::unique_ptr<Environment> Environment::Create() {
+  return MakeUnique<EnvironmentImpl>();
 }
 
-bool Environment::HasVar(const char* variable_name) {
-  return GetVar(variable_name, NULL);
+bool Environment::HasVar(StringPiece variable_name) {
+  return GetVar(variable_name, nullptr);
 }
 
 #if defined(OS_WIN)
@@ -184,8 +185,8 @@
 
 #elif defined(OS_POSIX)
 
-scoped_ptr<char*[]> AlterEnvironment(const char* const* const env,
-                                     const EnvironmentMap& changes) {
+std::unique_ptr<char* []> AlterEnvironment(const char* const* const env,
+                                           const EnvironmentMap& changes) {
   std::string value_storage;  // Holds concatenated null-terminated strings.
   std::vector<size_t> result_indices;  // Line indices into value_storage.
 
@@ -218,7 +219,7 @@
   size_t pointer_count_required =
       result_indices.size() + 1 +  // Null-terminated array of pointers.
       (value_storage.size() + sizeof(char*) - 1) / sizeof(char*);  // Buffer.
-  scoped_ptr<char*[]> result(new char*[pointer_count_required]);
+  std::unique_ptr<char* []> result(new char*[pointer_count_required]);
 
   // The string storage goes after the array of pointers.
   char* storage_data = reinterpret_cast<char*>(
diff --git a/base/environment.h b/base/environment.h
index c8811e2..3a4ed04 100644
--- a/base/environment.h
+++ b/base/environment.h
@@ -6,11 +6,12 @@
 #define BASE_ENVIRONMENT_H_
 
 #include <map>
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -27,23 +28,22 @@
  public:
   virtual ~Environment();
 
-  // Static factory method that returns the implementation that provide the
-  // appropriate platform-specific instance.
-  static Environment* Create();
+  // Returns the appropriate platform-specific instance.
+  static std::unique_ptr<Environment> Create();
 
   // Gets an environment variable's value and stores it in |result|.
   // Returns false if the key is unset.
-  virtual bool GetVar(const char* variable_name, std::string* result) = 0;
+  virtual bool GetVar(StringPiece variable_name, std::string* result) = 0;
 
-  // Syntactic sugar for GetVar(variable_name, NULL);
-  virtual bool HasVar(const char* variable_name);
+  // Syntactic sugar for GetVar(variable_name, nullptr);
+  virtual bool HasVar(StringPiece variable_name);
 
   // Returns true on success, otherwise returns false.
-  virtual bool SetVar(const char* variable_name,
+  virtual bool SetVar(StringPiece variable_name,
                       const std::string& new_value) = 0;
 
   // Returns true on success, otherwise returns false.
-  virtual bool UnSetVar(const char* variable_name) = 0;
+  virtual bool UnSetVar(StringPiece variable_name) = 0;
 };
 
 
@@ -79,7 +79,7 @@
 // returned array will have appended to it the storage for the array itself so
 // there is only one pointer to manage, but this means that you can't copy the
 // array without keeping the original around.
-BASE_EXPORT scoped_ptr<char*[]> AlterEnvironment(
+BASE_EXPORT std::unique_ptr<char* []> AlterEnvironment(
     const char* const* env,
     const EnvironmentMap& changes);
 
diff --git a/base/environment_unittest.cc b/base/environment_unittest.cc
index 77e9717..ef264cf 100644
--- a/base/environment_unittest.cc
+++ b/base/environment_unittest.cc
@@ -3,7 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/environment.h"
-#include "base/memory/scoped_ptr.h"
+
+#include <memory>
+
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
@@ -14,14 +16,14 @@
 
 TEST_F(EnvironmentTest, GetVar) {
   // Every setup should have non-empty PATH...
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   std::string env_value;
   EXPECT_TRUE(env->GetVar("PATH", &env_value));
   EXPECT_NE(env_value, "");
 }
 
 TEST_F(EnvironmentTest, GetVarReverse) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   const char kFooUpper[] = "FOO";
   const char kFooLower[] = "foo";
 
@@ -50,12 +52,12 @@
 
 TEST_F(EnvironmentTest, HasVar) {
   // Every setup should have PATH...
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   EXPECT_TRUE(env->HasVar("PATH"));
 }
 
 TEST_F(EnvironmentTest, SetVar) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
 
   const char kFooUpper[] = "FOO";
   const char kFooLower[] = "foo";
@@ -70,7 +72,7 @@
 }
 
 TEST_F(EnvironmentTest, UnSetVar) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
 
   const char kFooUpper[] = "FOO";
   const char kFooLower[] = "foo";
@@ -128,7 +130,7 @@
   const char* const empty[] = { NULL };
   const char* const a2[] = { "A=2", NULL };
   EnvironmentMap changes;
-  scoped_ptr<char*[]> e;
+  std::unique_ptr<char* []> e;
 
   e = AlterEnvironment(empty, changes);
   EXPECT_TRUE(e[0] == NULL);
diff --git a/base/feature_list.cc b/base/feature_list.cc
new file mode 100644
index 0000000..435165e
--- /dev/null
+++ b/base/feature_list.cc
@@ -0,0 +1,306 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <utility>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Pointer to the FeatureList instance singleton that was set via
+// FeatureList::SetInstance(). Does not use base/memory/singleton.h in order to
+// have more control over initialization timing. Leaky.
+FeatureList* g_instance = nullptr;
+
+// Tracks whether the FeatureList instance was initialized via an accessor.
+bool g_initialized_from_accessor = false;
+
+// Some characters are not allowed to appear in feature names or the associated
+// field trial names, as they are used as special characters for command-line
+// serialization. This function checks that the strings are ASCII (since they
+// are used in command-line API functions that require ASCII) and whether there
+// are any reserved characters present, returning true if the string is valid.
+// Only called in DCHECKs.
+bool IsValidFeatureOrFieldTrialName(const std::string& name) {
+  return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
+}
+
+}  // namespace
+
+FeatureList::FeatureList() {}
+
+FeatureList::~FeatureList() {}
+
+void FeatureList::InitializeFromCommandLine(
+    const std::string& enable_features,
+    const std::string& disable_features) {
+  DCHECK(!initialized_);
+
+  // Process disabled features first, so that disabled ones take precedence over
+  // enabled ones (since RegisterOverride() uses insert()).
+  RegisterOverridesFromCommandLine(disable_features, OVERRIDE_DISABLE_FEATURE);
+  RegisterOverridesFromCommandLine(enable_features, OVERRIDE_ENABLE_FEATURE);
+
+  initialized_from_command_line_ = true;
+}
+
+bool FeatureList::IsFeatureOverriddenFromCommandLine(
+    const std::string& feature_name,
+    OverrideState state) const {
+  auto it = overrides_.find(feature_name);
+  return it != overrides_.end() && it->second.overridden_state == state &&
+         !it->second.overridden_by_field_trial;
+}
+
+void FeatureList::AssociateReportingFieldTrial(
+    const std::string& feature_name,
+    OverrideState for_overridden_state,
+    FieldTrial* field_trial) {
+  DCHECK(
+      IsFeatureOverriddenFromCommandLine(feature_name, for_overridden_state));
+
+  // Only one associated field trial is supported per feature. This is generally
+  // enforced server-side.
+  OverrideEntry* entry = &overrides_.find(feature_name)->second;
+  if (entry->field_trial) {
+    NOTREACHED() << "Feature " << feature_name
+                 << " already has trial: " << entry->field_trial->trial_name()
+                 << ", associating trial: " << field_trial->trial_name();
+    return;
+  }
+
+  entry->field_trial = field_trial;
+}
+
+void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
+                                             OverrideState override_state,
+                                             FieldTrial* field_trial) {
+  DCHECK(field_trial);
+  DCHECK(!ContainsKey(overrides_, feature_name) ||
+         !overrides_.find(feature_name)->second.field_trial)
+      << "Feature " << feature_name
+      << " has conflicting field trial overrides: "
+      << overrides_.find(feature_name)->second.field_trial->trial_name()
+      << " / " << field_trial->trial_name();
+
+  RegisterOverride(feature_name, override_state, field_trial);
+}
+
+void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
+                                      std::string* disable_overrides) {
+  DCHECK(initialized_);
+
+  enable_overrides->clear();
+  disable_overrides->clear();
+
+  // Note: Since |overrides_| is a std::map, iteration will be in alphabetical
+  // order. This not guaranteed to users of this function, but is useful for
+  // tests to assume the order.
+  for (const auto& entry : overrides_) {
+    std::string* target_list = nullptr;
+    switch (entry.second.overridden_state) {
+      case OVERRIDE_USE_DEFAULT:
+      case OVERRIDE_ENABLE_FEATURE:
+        target_list = enable_overrides;
+        break;
+      case OVERRIDE_DISABLE_FEATURE:
+        target_list = disable_overrides;
+        break;
+    }
+
+    if (!target_list->empty())
+      target_list->push_back(',');
+    if (entry.second.overridden_state == OVERRIDE_USE_DEFAULT)
+      target_list->push_back('*');
+    target_list->append(entry.first);
+    if (entry.second.field_trial) {
+      target_list->push_back('<');
+      target_list->append(entry.second.field_trial->trial_name());
+    }
+  }
+}
+
+// static
+bool FeatureList::IsEnabled(const Feature& feature) {
+  if (!g_instance) {
+    g_initialized_from_accessor = true;
+    return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+  }
+  return g_instance->IsFeatureEnabled(feature);
+}
+
+// static
+FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
+  return GetInstance()->GetAssociatedFieldTrial(feature);
+}
+
+// static
+std::vector<std::string> FeatureList::SplitFeatureListString(
+    const std::string& input) {
+  return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+}
+
+// static
+bool FeatureList::InitializeInstance(const std::string& enable_features,
+                                     const std::string& disable_features) {
+  // We want to initialize a new instance here to support command-line features
+  // in testing better. For example, we initialize a dummy instance in
+  // base/test/test_suite.cc, and override it in content/browser/
+  // browser_main_loop.cc.
+  // On the other hand, we want to avoid re-initialization from command line.
+  // For example, we initialize an instance in chrome/browser/
+  // chrome_browser_main.cc and do not override it in content/browser/
+  // browser_main_loop.cc.
+  // If the singleton was previously initialized from within an accessor, we
+  // want to prevent callers from reinitializing the singleton and masking the
+  // accessor call(s) which likely returned incorrect information.
+  CHECK(!g_initialized_from_accessor);
+  bool instance_existed_before = false;
+  if (g_instance) {
+    if (g_instance->initialized_from_command_line_)
+      return false;
+
+    delete g_instance;
+    g_instance = nullptr;
+    instance_existed_before = true;
+  }
+
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+  feature_list->InitializeFromCommandLine(enable_features, disable_features);
+  base::FeatureList::SetInstance(std::move(feature_list));
+  return !instance_existed_before;
+}
+
+// static
+FeatureList* FeatureList::GetInstance() {
+  return g_instance;
+}
+
+// static
+void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
+  DCHECK(!g_instance);
+  instance->FinalizeInitialization();
+
+  // Note: Intentional leak of global singleton.
+  g_instance = instance.release();
+}
+
+// static
+void FeatureList::ClearInstanceForTesting() {
+  delete g_instance;
+  g_instance = nullptr;
+  g_initialized_from_accessor = false;
+}
+
+void FeatureList::FinalizeInitialization() {
+  DCHECK(!initialized_);
+  initialized_ = true;
+}
+
+bool FeatureList::IsFeatureEnabled(const Feature& feature) {
+  DCHECK(initialized_);
+  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+  auto it = overrides_.find(feature.name);
+  if (it != overrides_.end()) {
+    const OverrideEntry& entry = it->second;
+
+    // Activate the corresponding field trial, if necessary.
+    if (entry.field_trial)
+      entry.field_trial->group();
+
+    // TODO(asvitkine) Expand this section as more support is added.
+
+    // If marked as OVERRIDE_USE_DEFAULT, simply return the default state below.
+    if (entry.overridden_state != OVERRIDE_USE_DEFAULT)
+      return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
+  }
+  // Otherwise, return the default state.
+  return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+}
+
+FieldTrial* FeatureList::GetAssociatedFieldTrial(const Feature& feature) {
+  DCHECK(initialized_);
+  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+  auto it = overrides_.find(feature.name);
+  if (it != overrides_.end()) {
+    const OverrideEntry& entry = it->second;
+    return entry.field_trial;
+  }
+
+  return nullptr;
+}
+
+void FeatureList::RegisterOverridesFromCommandLine(
+    const std::string& feature_list,
+    OverrideState overridden_state) {
+  for (const auto& value : SplitFeatureListString(feature_list)) {
+    StringPiece feature_name(value);
+    base::FieldTrial* trial = nullptr;
+
+    // The entry may be of the form FeatureName<FieldTrialName - in which case,
+    // this splits off the field trial name and associates it with the override.
+    std::string::size_type pos = feature_name.find('<');
+    if (pos != std::string::npos) {
+      feature_name.set(value.data(), pos);
+      trial = base::FieldTrialList::Find(value.substr(pos + 1));
+    }
+
+    RegisterOverride(feature_name, overridden_state, trial);
+  }
+}
+
+void FeatureList::RegisterOverride(StringPiece feature_name,
+                                   OverrideState overridden_state,
+                                   FieldTrial* field_trial) {
+  DCHECK(!initialized_);
+  if (field_trial) {
+    DCHECK(IsValidFeatureOrFieldTrialName(field_trial->trial_name()))
+        << field_trial->trial_name();
+  }
+  if (feature_name.starts_with("*")) {
+    feature_name = feature_name.substr(1);
+    overridden_state = OVERRIDE_USE_DEFAULT;
+  }
+
+  // Note: The semantics of insert() is that it does not overwrite the entry if
+  // one already exists for the key. Thus, only the first override for a given
+  // feature name takes effect.
+  overrides_.insert(std::make_pair(
+      feature_name.as_string(), OverrideEntry(overridden_state, field_trial)));
+}
+
+bool FeatureList::CheckFeatureIdentity(const Feature& feature) {
+  AutoLock auto_lock(feature_identity_tracker_lock_);
+
+  auto it = feature_identity_tracker_.find(feature.name);
+  if (it == feature_identity_tracker_.end()) {
+    // If it's not tracked yet, register it.
+    feature_identity_tracker_[feature.name] = &feature;
+    return true;
+  }
+  // Compare address of |feature| to the existing tracked entry.
+  return it->second == &feature;
+}
+
+FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state,
+                                          FieldTrial* field_trial)
+    : overridden_state(overridden_state),
+      field_trial(field_trial),
+      overridden_by_field_trial(field_trial != nullptr) {}
+
+}  // namespace base
diff --git a/base/feature_list.h b/base/feature_list.h
new file mode 100644
index 0000000..e9ed00a
--- /dev/null
+++ b/base/feature_list.h
@@ -0,0 +1,260 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FEATURE_LIST_H_
+#define BASE_FEATURE_LIST_H_
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class FieldTrial;
+
+// Specifies whether a given feature is enabled or disabled by default.
+enum FeatureState {
+  FEATURE_DISABLED_BY_DEFAULT,
+  FEATURE_ENABLED_BY_DEFAULT,
+};
+
+// The Feature struct is used to define the default state for a feature. See
+// comment below for more details. There must only ever be one struct instance
+// for a given feature name - generally defined as a constant global variable or
+// file static.
+struct BASE_EXPORT Feature {
+  // The name of the feature. This should be unique to each feature and is used
+  // for enabling/disabling features via command line flags and experiments.
+  const char* const name;
+
+  // The default state (i.e. enabled or disabled) for this feature.
+  const FeatureState default_state;
+};
+
+// The FeatureList class is used to determine whether a given feature is on or
+// off. It provides an authoritative answer, taking into account command-line
+// overrides and experimental control.
+//
+// The basic use case is for any feature that can be toggled (e.g. through
+// command-line or an experiment) to have a defined Feature struct, e.g.:
+//
+//   const base::Feature kMyGreatFeature {
+//     "MyGreatFeature", base::FEATURE_ENABLED_BY_DEFAULT
+//   };
+//
+// Then, client code that wishes to query the state of the feature would check:
+//
+//   if (base::FeatureList::IsEnabled(kMyGreatFeature)) {
+//     // Feature code goes here.
+//   }
+//
+// Behind the scenes, the above call would take into account any command-line
+// flags to enable or disable the feature, any experiments that may control it
+// and finally its default state (in that order of priority), to determine
+// whether the feature is on.
+//
+// Features can be explicitly forced on or off by specifying a list of comma-
+// separated feature names via the following command-line flags:
+//
+//   --enable-features=Feature5,Feature7
+//   --disable-features=Feature1,Feature2,Feature3
+//
+// After initialization (which should be done single-threaded), the FeatureList
+// API is thread safe.
+//
+// Note: This class is a singleton, but does not use base/memory/singleton.h in
+// order to have control over its initialization sequence. Specifically, the
+// intended use is to create an instance of this class and fully initialize it,
+// before setting it as the singleton for a process, via SetInstance().
+class BASE_EXPORT FeatureList {
+ public:
+  FeatureList();
+  ~FeatureList();
+
+  // Initializes feature overrides via command-line flags |enable_features| and
+  // |disable_features|, each of which is a comma-separated list of features to
+  // enable or disable, respectively. If a feature appears on both lists, then
+  // it will be disabled. If a list entry has the format "FeatureName<TrialName"
+  // then this initialization will also associate the feature state override
+  // with the named field trial, if it exists. If a feature name is prefixed
+  // with the '*' character, it will be created with OVERRIDE_USE_DEFAULT -
+  // which is useful for associating with a trial while using the default state.
+  // Must only be invoked during the initialization phase (before
+  // FinalizeInitialization() has been called).
+  void InitializeFromCommandLine(const std::string& enable_features,
+                                 const std::string& disable_features);
+
+  // Specifies whether a feature override enables or disables the feature.
+  enum OverrideState {
+    OVERRIDE_USE_DEFAULT,
+    OVERRIDE_DISABLE_FEATURE,
+    OVERRIDE_ENABLE_FEATURE,
+  };
+
+  // Returns true if the state of |feature_name| has been overridden via
+  // |InitializeFromCommandLine()|.
+  bool IsFeatureOverriddenFromCommandLine(const std::string& feature_name,
+                                          OverrideState state) const;
+
+  // Associates a field trial for reporting purposes corresponding to the
+  // command-line setting the feature state to |for_overridden_state|. The trial
+  // will be activated when the state of the feature is first queried. This
+  // should be called during registration, after InitializeFromCommandLine() has
+  // been called but before the instance is registered via SetInstance().
+  void AssociateReportingFieldTrial(const std::string& feature_name,
+                                    OverrideState for_overridden_state,
+                                    FieldTrial* field_trial);
+
+  // Registers a field trial to override the enabled state of the specified
+  // feature to |override_state|. Command-line overrides still take precedence
+  // over field trials, so this will have no effect if the feature is being
+  // overridden from the command-line. The associated field trial will be
+  // activated when the feature state for this feature is queried. This should
+  // be called during registration, after InitializeFromCommandLine() has been
+  // called but before the instance is registered via SetInstance().
+  void RegisterFieldTrialOverride(const std::string& feature_name,
+                                  OverrideState override_state,
+                                  FieldTrial* field_trial);
+
+  // Returns comma-separated lists of feature names (in the same format that is
+  // accepted by InitializeFromCommandLine()) corresponding to features that
+  // have been overridden - either through command-line or via FieldTrials. For
+  // those features that have an associated FieldTrial, the output entry will be
+  // of the format "FeatureName<TrialName", where "TrialName" is the name of the
+  // FieldTrial. Features that have overrides with OVERRIDE_USE_DEFAULT will be
+  // added to |enable_overrides| with a '*' character prefix. Must be called
+  // only after the instance has been initialized and registered.
+  void GetFeatureOverrides(std::string* enable_overrides,
+                           std::string* disable_overrides);
+
+  // Returns whether the given |feature| is enabled. Must only be called after
+  // the singleton instance has been registered via SetInstance(). Additionally,
+  // a feature with a given name must only have a single corresponding Feature
+  // struct, which is checked in builds with DCHECKs enabled.
+  static bool IsEnabled(const Feature& feature);
+
+  // Returns the field trial associated with the given |feature|. Must only be
+  // called after the singleton instance has been registered via SetInstance().
+  static FieldTrial* GetFieldTrial(const Feature& feature);
+
+  // Splits a comma-separated string containing feature names into a vector.
+  static std::vector<std::string> SplitFeatureListString(
+      const std::string& input);
+
+  // Initializes and sets an instance of FeatureList with feature overrides via
+  // command-line flags |enable_features| and |disable_features| if one has not
+  // already been set from command-line flags. Returns true if an instance did
+  // not previously exist. See InitializeFromCommandLine() for more details
+  // about |enable_features| and |disable_features| parameters.
+  static bool InitializeInstance(const std::string& enable_features,
+                                 const std::string& disable_features);
+
+  // Returns the singleton instance of FeatureList. Will return null until an
+  // instance is registered via SetInstance().
+  static FeatureList* GetInstance();
+
+  // Registers the given |instance| to be the singleton feature list for this
+  // process. This should only be called once and |instance| must not be null.
+  static void SetInstance(std::unique_ptr<FeatureList> instance);
+
+  // Clears the previously-registered singleton instance for tests.
+  static void ClearInstanceForTesting();
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
+
+  struct OverrideEntry {
+    // The overridden enable (on/off) state of the feature.
+    const OverrideState overridden_state;
+
+    // An optional associated field trial, which will be activated when the
+    // state of the feature is queried for the first time. Weak pointer to the
+    // FieldTrial object that is owned by the FieldTrialList singleton.
+    base::FieldTrial* field_trial;
+
+    // Specifies whether the feature's state is overridden by |field_trial|.
+    // If it's not, and |field_trial| is not null, it means it is simply an
+    // associated field trial for reporting purposes (and |overridden_state|
+    // came from the command-line).
+    const bool overridden_by_field_trial;
+
+    // TODO(asvitkine): Expand this as more support is added.
+
+    // Constructs an OverrideEntry for the given |overridden_state|. If
+    // |field_trial| is not null, it implies that |overridden_state| comes from
+    // the trial, so |overridden_by_field_trial| will be set to true.
+    OverrideEntry(OverrideState overridden_state, FieldTrial* field_trial);
+  };
+
+  // Finalizes the initialization state of the FeatureList, so that no further
+  // overrides can be registered. This is called by SetInstance() on the
+  // singleton feature list that is being registered.
+  void FinalizeInitialization();
+
+  // Returns whether the given |feature| is enabled. This is invoked by the
+  // public FeatureList::IsEnabled() static function on the global singleton.
+  // Requires the FeatureList to have already been fully initialized.
+  bool IsFeatureEnabled(const Feature& feature);
+
+  // Returns the field trial associated with the given |feature|. This is
+  // invoked by the public FeatureList::GetFieldTrial() static function on the
+  // global singleton. Requires the FeatureList to have already been fully
+  // initialized.
+  base::FieldTrial* GetAssociatedFieldTrial(const Feature& feature);
+
+  // For each feature name in comma-separated list of strings |feature_list|,
+  // registers an override with the specified |overridden_state|. Also, will
+  // associate an optional named field trial if the entry is of the format
+  // "FeatureName<TrialName".
+  void RegisterOverridesFromCommandLine(const std::string& feature_list,
+                                        OverrideState overridden_state);
+
+  // Registers an override for feature |feature_name|. The override specifies
+  // whether the feature should be on or off (via |overridden_state|), which
+  // will take precedence over the feature's default state. If |field_trial| is
+  // not null, registers the specified field trial object to be associated with
+  // the feature, which will activate the field trial when the feature state is
+  // queried. If an override is already registered for the given feature, it
+  // will not be changed.
+  void RegisterOverride(StringPiece feature_name,
+                        OverrideState overridden_state,
+                        FieldTrial* field_trial);
+
+  // Verifies that there's only a single definition of a Feature struct for a
+  // given feature name. Keeps track of the first seen Feature struct for each
+  // feature. Returns false when called on a Feature struct with a different
+  // address than the first one it saw for that feature name. Used only from
+  // DCHECKs and tests.
+  bool CheckFeatureIdentity(const Feature& feature);
+
+  // Map from feature name to an OverrideEntry struct for the feature, if it
+  // exists.
+  std::map<std::string, OverrideEntry> overrides_;
+
+  // Locked map that keeps track of seen features, to ensure a single feature is
+  // only defined once. This verification is only done in builds with DCHECKs
+  // enabled.
+  Lock feature_identity_tracker_lock_;
+  std::map<std::string, const Feature*> feature_identity_tracker_;
+
+  // Whether this object has been fully initialized. This gets set to true as a
+  // result of FinalizeInitialization().
+  bool initialized_ = false;
+
+  // Whether this object has been initialized from command line.
+  bool initialized_from_command_line_ = false;
+
+  DISALLOW_COPY_AND_ASSIGN(FeatureList);
+};
+
+}  // namespace base
+
+#endif  // BASE_FEATURE_LIST_H_
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
new file mode 100644
index 0000000..9d1dcb7
--- /dev/null
+++ b/base/feature_list_unittest.cc
@@ -0,0 +1,471 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+#include <utility>
+
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const char kFeatureOnByDefaultName[] = "OnByDefault";
+struct Feature kFeatureOnByDefault {
+  kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+};
+
+const char kFeatureOffByDefaultName[] = "OffByDefault";
+struct Feature kFeatureOffByDefault {
+  kFeatureOffByDefaultName, FEATURE_DISABLED_BY_DEFAULT
+};
+
+std::string SortFeatureListString(const std::string& feature_list) {
+  std::vector<std::string> features =
+      FeatureList::SplitFeatureListString(feature_list);
+  std::sort(features.begin(), features.end());
+  return JoinString(features, ",");
+}
+
+}  // namespace
+
+class FeatureListTest : public testing::Test {
+ public:
+  FeatureListTest() : feature_list_(nullptr) {
+    RegisterFeatureListInstance(WrapUnique(new FeatureList));
+  }
+  ~FeatureListTest() override { ClearFeatureListInstance(); }
+
+  void RegisterFeatureListInstance(std::unique_ptr<FeatureList> feature_list) {
+    FeatureList::ClearInstanceForTesting();
+    feature_list_ = feature_list.get();
+    FeatureList::SetInstance(std::move(feature_list));
+  }
+  void ClearFeatureListInstance() {
+    FeatureList::ClearInstanceForTesting();
+    feature_list_ = nullptr;
+  }
+
+  FeatureList* feature_list() { return feature_list_; }
+
+ private:
+  // Weak. Owned by the FeatureList::SetInstance().
+  FeatureList* feature_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(FeatureListTest);
+};
+
+TEST_F(FeatureListTest, DefaultStates) {
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine) {
+  struct {
+    const char* enable_features;
+    const char* disable_features;
+    bool expected_feature_on_state;
+    bool expected_feature_off_state;
+  } test_cases[] = {
+      {"", "", true, false},
+      {"OffByDefault", "", true, true},
+      {"OffByDefault", "OnByDefault", false, true},
+      {"OnByDefault,OffByDefault", "", true, true},
+      {"", "OnByDefault,OffByDefault", false, false},
+      // In the case an entry is both, disable takes precedence.
+      {"OnByDefault", "OnByDefault,OffByDefault", false, false},
+  };
+
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+                                    test_case.enable_features,
+                                    test_case.disable_features));
+
+    ClearFeatureListInstance();
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->InitializeFromCommandLine(test_case.enable_features,
+                                            test_case.disable_features);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    EXPECT_EQ(test_case.expected_feature_on_state,
+              FeatureList::IsEnabled(kFeatureOnByDefault))
+        << i;
+    EXPECT_EQ(test_case.expected_feature_off_state,
+              FeatureList::IsEnabled(kFeatureOffByDefault))
+        << i;
+  }
+}
+
+TEST_F(FeatureListTest, CheckFeatureIdentity) {
+  // Tests that CheckFeatureIdentity() correctly detects when two different
+  // structs with the same feature name are passed to it.
+
+  // Call it twice for each feature at the top of the file, since the first call
+  // makes it remember the entry and the second call will verify it.
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+
+  // Now, call it with a distinct struct for |kFeatureOnByDefaultName|, which
+  // should return false.
+  struct Feature kFeatureOnByDefault2 {
+    kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+  };
+  EXPECT_FALSE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault2));
+}
+
+TEST_F(FeatureListTest, FieldTrialOverrides) {
+  struct {
+    FeatureList::OverrideState trial1_state;
+    FeatureList::OverrideState trial2_state;
+  } test_cases[] = {
+      {FeatureList::OVERRIDE_DISABLE_FEATURE,
+       FeatureList::OVERRIDE_DISABLE_FEATURE},
+      {FeatureList::OVERRIDE_DISABLE_FEATURE,
+       FeatureList::OVERRIDE_ENABLE_FEATURE},
+      {FeatureList::OVERRIDE_ENABLE_FEATURE,
+       FeatureList::OVERRIDE_DISABLE_FEATURE},
+      {FeatureList::OVERRIDE_ENABLE_FEATURE,
+       FeatureList::OVERRIDE_ENABLE_FEATURE},
+  };
+
+  FieldTrial::ActiveGroup active_group;
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]", i));
+
+    ClearFeatureListInstance();
+
+    FieldTrialList field_trial_list(nullptr);
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+    FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+    FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+    feature_list->RegisterFieldTrialOverride(kFeatureOnByDefaultName,
+                                             test_case.trial1_state, trial1);
+    feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+                                             test_case.trial2_state, trial2);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    // Initially, neither trial should be active.
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+    const bool expected_enabled_1 =
+        (test_case.trial1_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+    EXPECT_EQ(expected_enabled_1, FeatureList::IsEnabled(kFeatureOnByDefault));
+    // The above should have activated |trial1|.
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+    const bool expected_enabled_2 =
+        (test_case.trial2_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+    EXPECT_EQ(expected_enabled_2, FeatureList::IsEnabled(kFeatureOffByDefault));
+    // The above should have activated |trial2|.
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+  }
+}
+
+TEST_F(FeatureListTest, FieldTrialAssociateUseDefault) {
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+  FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+  FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  // Initially, neither trial should be active.
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+  // Check the feature enabled state is its default.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  // The above should have activated |trial1|.
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+  // Check the feature enabled state is its default.
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  // The above should have activated |trial2|.
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+}
+
+TEST_F(FeatureListTest, CommandLineTakesPrecedenceOverFieldTrial) {
+  ClearFeatureListInstance();
+
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+  // The feature is explicitly enabled on the command-line.
+  feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+  // But the FieldTrial would set the feature to disabled.
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample2", "A");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, trial);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+  // Command-line should take precedence.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  // Since the feature is on due to the command-line, and not as a result of the
+  // field trial, the field trial should not be activated (since the Associate*
+  // API wasn't used.)
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+}
+
+TEST_F(FeatureListTest, IsFeatureOverriddenFromCommandLine) {
+  ClearFeatureListInstance();
+
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+  // No features are overridden from the command line yet
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Now, enable |kFeatureOffByDefaultName| via the command-line.
+  feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+  // It should now be overridden for the enabled group.
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Register a field trial to associate with the feature and ensure that the
+  // results are still the same.
+  feature_list->AssociateReportingFieldTrial(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+      FieldTrialList::CreateFieldTrial("Trial1", "A"));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Now, register a field trial to override |kFeatureOnByDefaultName| state
+  // and check that the function still returns false for that feature.
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+      FieldTrialList::CreateFieldTrial("Trial2", "A"));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  // Check the expected feature states for good measure.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+}
+
+TEST_F(FeatureListTest, AssociateReportingFieldTrial) {
+  struct {
+    const char* enable_features;
+    const char* disable_features;
+    bool expected_enable_trial_created;
+    bool expected_disable_trial_created;
+  } test_cases[] = {
+      // If no enable/disable flags are specified, no trials should be created.
+      {"", "", false, false},
+      // Enabling the feature should result in the enable trial created.
+      {kFeatureOffByDefaultName, "", true, false},
+      // Disabling the feature should result in the disable trial created.
+      {"", kFeatureOffByDefaultName, false, true},
+  };
+
+  const char kTrialName[] = "ForcingTrial";
+  const char kForcedOnGroupName[] = "ForcedOn";
+  const char kForcedOffGroupName[] = "ForcedOff";
+
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+                                    test_case.enable_features,
+                                    test_case.disable_features));
+
+    ClearFeatureListInstance();
+
+    FieldTrialList field_trial_list(nullptr);
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->InitializeFromCommandLine(test_case.enable_features,
+                                            test_case.disable_features);
+
+    FieldTrial* enable_trial = nullptr;
+    if (feature_list->IsFeatureOverriddenFromCommandLine(
+            kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE)) {
+      enable_trial = base::FieldTrialList::CreateFieldTrial(kTrialName,
+                                                            kForcedOnGroupName);
+      feature_list->AssociateReportingFieldTrial(
+          kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+          enable_trial);
+    }
+    FieldTrial* disable_trial = nullptr;
+    if (feature_list->IsFeatureOverriddenFromCommandLine(
+            kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE)) {
+      disable_trial = base::FieldTrialList::CreateFieldTrial(
+          kTrialName, kForcedOffGroupName);
+      feature_list->AssociateReportingFieldTrial(
+          kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+          disable_trial);
+    }
+    EXPECT_EQ(test_case.expected_enable_trial_created, enable_trial != nullptr);
+    EXPECT_EQ(test_case.expected_disable_trial_created,
+              disable_trial != nullptr);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+    if (disable_trial) {
+      EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+      EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+      EXPECT_EQ(kForcedOffGroupName, disable_trial->group_name());
+    } else if (enable_trial) {
+      EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+      EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+      EXPECT_EQ(kForcedOnGroupName, enable_trial->group_name());
+    }
+  }
+}
+
+TEST_F(FeatureListTest, GetFeatureOverrides) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,X", "D");
+
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+                                           FeatureList::OVERRIDE_ENABLE_FEATURE,
+                                           trial);
+
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  std::string enable_features;
+  std::string disable_features;
+  FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+                                                  &disable_features);
+  EXPECT_EQ("A,OffByDefault<Trial,X", SortFeatureListString(enable_features));
+  EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, GetFeatureOverrides_UseDefault) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,X", "D");
+
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  std::string enable_features;
+  std::string disable_features;
+  FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+                                                  &disable_features);
+  EXPECT_EQ("*OffByDefault<Trial,A,X", SortFeatureListString(enable_features));
+  EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, GetFieldTrial) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_EQ(trial, FeatureList::GetFieldTrial(kFeatureOnByDefault));
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine_WithFieldTrials) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("Trial", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,OffByDefault<Trial,X", "D");
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("Trial"));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("Trial"));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine_UseDefault) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("T1", "Group");
+  FieldTrialList::CreateFieldTrial("T2", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine(
+      "A,*OffByDefault<T1,*OnByDefault<T2,X", "D");
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("T1"));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("T1"));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("T2"));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("T2"));
+}
+
+TEST_F(FeatureListTest, InitializeInstance) {
+  ClearFeatureListInstance();
+
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+  FeatureList::SetInstance(std::move(feature_list));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+  // Initialize from command line if we haven't yet.
+  FeatureList::InitializeInstance("", kFeatureOnByDefaultName);
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+  // Do not initialize from commandline if we have already.
+  FeatureList::InitializeInstance(kFeatureOffByDefaultName, "");
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, UninitializedInstance_IsEnabledReturnsFalse) {
+  ClearFeatureListInstance();
+  // This test case simulates the calling pattern found in code which does not
+  // explicitly initialize the features list.
+  // All IsEnabled() calls should return the default value in this scenario.
+  EXPECT_EQ(nullptr, FeatureList::GetInstance());
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_EQ(nullptr, FeatureList::GetInstance());
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+}  // namespace base
diff --git a/base/file_version_info.h b/base/file_version_info.h
index 8c1bf92..3b9457c 100644
--- a/base/file_version_info.h
+++ b/base/file_version_info.h
@@ -5,18 +5,15 @@
 #ifndef BASE_FILE_VERSION_INFO_H_
 #define BASE_FILE_VERSION_INFO_H_
 
+#include <string>
+
 #include "build/build_config.h"
+#include "base/base_export.h"
+#include "base/strings/string16.h"
 
 #if defined(OS_WIN)
 #include <windows.h>
-// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
-extern "C" IMAGE_DOS_HEADER __ImageBase;
-#endif  // OS_WIN
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/strings/string16.h"
+#endif
 
 namespace base {
 class FilePath;
@@ -32,17 +29,6 @@
 // version returns values from the Info.plist as appropriate. TODO(avi): make
 // this a less-obvious Windows-ism.
 
-#if defined(OS_WIN)
-// Creates a FileVersionInfo for the current module. Returns NULL in case of
-// error. The returned object should be deleted when you are done with it. This
-// is done as a macro to force inlining of __ImageBase. It used to be inside of
-// a method labeled with __forceinline, but inlining through __forceinline
-// stopped working for Debug builds in VS2013 (http://crbug.com/516359).
-#define CREATE_FILE_VERSION_INFO_FOR_CURRENT_MODULE() \
-    FileVersionInfo::CreateFileVersionInfoForModule( \
-        reinterpret_cast<HMODULE>(&__ImageBase))
-#endif
-
 class BASE_EXPORT FileVersionInfo {
  public:
   virtual ~FileVersionInfo() {}
@@ -57,8 +43,6 @@
 #if defined(OS_WIN)
   // Creates a FileVersionInfo for the specified module. Returns NULL in case
   // of error. The returned object should be deleted when you are done with it.
-  // See CREATE_FILE_VERSION_INFO_FOR_CURRENT_MODULE() helper above for a
-  // CreateFileVersionInfoForCurrentModule() alternative for Windows.
   static FileVersionInfo* CreateFileVersionInfoForModule(HMODULE module);
 #else
   // Creates a FileVersionInfo for the current module. Returns NULL in case
diff --git a/base/file_version_info_unittest.cc b/base/file_version_info_unittest.cc
index 66e298d..67edc77 100644
--- a/base/file_version_info_unittest.cc
+++ b/base/file_version_info_unittest.cc
@@ -2,12 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/file_version_info.h"
+
 #include <stddef.h>
 
-#include "base/file_version_info.h"
+#include <memory>
+
 #include "base/files/file_path.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -60,7 +63,7 @@
   FilePath dll_path = GetTestDataPath();
   dll_path = dll_path.Append(kDLLName);
 
-  scoped_ptr<FileVersionInfo> version_info(
+  std::unique_ptr<FileVersionInfo> version_info(
       FileVersionInfo::CreateFileVersionInfo(dll_path));
 
   int j = 0;
@@ -101,7 +104,7 @@
     FilePath dll_path = GetTestDataPath();
     dll_path = dll_path.Append(kDLLNames[i]);
 
-    scoped_ptr<FileVersionInfo> version_info(
+    std::unique_ptr<FileVersionInfo> version_info(
         FileVersionInfo::CreateFileVersionInfo(dll_path));
 
     EXPECT_EQ(kExpected[i], version_info->is_official_build());
@@ -114,7 +117,7 @@
   FilePath dll_path = GetTestDataPath();
   dll_path = dll_path.AppendASCII("FileVersionInfoTest1.dll");
 
-  scoped_ptr<FileVersionInfo> version_info(
+  std::unique_ptr<FileVersionInfo> version_info(
       FileVersionInfo::CreateFileVersionInfo(dll_path));
 
   // Test few existing properties.
diff --git a/base/files/file.h b/base/files/file.h
index 7ab5ca5..ae2bd1b 100644
--- a/base/files/file.h
+++ b/base/files/file.h
@@ -13,7 +13,7 @@
 #include "base/files/file_path.h"
 #include "base/files/file_tracing.h"
 #include "base/files/scoped_file.h"
-#include "base/move.h"
+#include "base/macros.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
 
@@ -29,10 +29,13 @@
 namespace base {
 
 #if defined(OS_WIN)
-typedef HANDLE PlatformFile;
-#elif defined(OS_POSIX)
-typedef int PlatformFile;
+using PlatformFile = HANDLE;
 
+const PlatformFile kInvalidPlatformFile = INVALID_HANDLE_VALUE;
+#elif defined(OS_POSIX)
+using PlatformFile = int;
+
+const PlatformFile kInvalidPlatformFile = -1;
 #if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
 typedef struct stat stat_wrapper_t;
 #else
@@ -51,8 +54,6 @@
 // to the OS is not considered const, even if there is no apparent change to
 // member variables.
 class BASE_EXPORT File {
-  MOVE_ONLY_TYPE_FOR_CPP_03(File)
-
  public:
   // FLAG_(OPEN|CREATE).* are mutually exclusive. You should specify exactly one
   // of the five (possibly combining with other flags) when opening or creating
@@ -331,6 +332,8 @@
   Error error_details_;
   bool created_;
   bool async_;
+
+  DISALLOW_COPY_AND_ASSIGN(File);
 };
 
 }  // namespace base
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 2677258..29f12a8 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -44,7 +44,7 @@
 // otherwise returns npos.  This can only be true on Windows, when a pathname
 // begins with a letter followed by a colon.  On other platforms, this always
 // returns npos.
-StringPieceType::size_type FindDriveLetter(StringPieceType /* path */) {
+StringPieceType::size_type FindDriveLetter(StringPieceType path) {
 #if defined(FILE_PATH_USES_DRIVE_LETTERS)
   // This is dependent on an ASCII-based character set, but that's a
   // reasonable assumption.  iswalpha can be too inclusive here.
@@ -53,6 +53,8 @@
        (path[0] >= L'a' && path[0] <= L'z'))) {
     return 1;
   }
+#else
+  (void)path;  // Avoid an unused warning.
 #endif  // FILE_PATH_USES_DRIVE_LETTERS
   return StringType::npos;
 }
@@ -502,10 +504,10 @@
   // Don't append a separator if the path is empty (indicating the current
   // directory) or if the path component is empty (indicating nothing to
   // append).
-  if (appended.length() > 0 && new_path.path_.length() > 0) {
+  if (!appended.empty() && !new_path.path_.empty()) {
     // Don't append a separator if the path still ends with a trailing
     // separator after stripping (indicating the root directory).
-    if (!IsSeparator(new_path.path_[new_path.path_.length() - 1])) {
+    if (!IsSeparator(new_path.path_.back())) {
       // Don't append a separator if the path is just a drive letter.
       if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
         new_path.path_.append(1, kSeparators[0]);
@@ -537,7 +539,7 @@
 bool FilePath::EndsWithSeparator() const {
   if (empty())
     return false;
-  return IsSeparator(path_[path_.size() - 1]);
+  return IsSeparator(path_.back());
 }
 
 FilePath FilePath::AsEndingWithSeparator() const {
@@ -610,7 +612,7 @@
 }
 
 // static
-FilePath FilePath::FromUTF8Unsafe(const std::string& utf8) {
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
 #if defined(SYSTEM_NATIVE_UTF8)
   return FilePath(utf8);
 #else
@@ -619,11 +621,11 @@
 }
 
 // static
-FilePath FilePath::FromUTF16Unsafe(const string16& utf16) {
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
 #if defined(SYSTEM_NATIVE_UTF8)
   return FilePath(UTF16ToUTF8(utf16));
 #else
-  return FilePath(SysWideToNativeMB(UTF16ToWide(utf16)));
+  return FilePath(SysWideToNativeMB(UTF16ToWide(utf16.as_string())));
 #endif
 }
 
@@ -647,16 +649,24 @@
 }
 
 // static
-FilePath FilePath::FromUTF8Unsafe(const std::string& utf8) {
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
   return FilePath(UTF8ToWide(utf8));
 }
 
 // static
-FilePath FilePath::FromUTF16Unsafe(const string16& utf16) {
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
   return FilePath(utf16);
 }
 #endif
 
+void FilePath::GetSizeForPickle(PickleSizer* sizer) const {
+#if defined(OS_WIN)
+  sizer->AddString16(path_);
+#else
+  sizer->AddString(path_);
+#endif
+}
+
 void FilePath::WriteToPickle(Pickle* pickle) const {
 #if defined(OS_WIN)
   pickle->WriteString16(path_);
@@ -685,6 +695,10 @@
 
 int FilePath::CompareIgnoreCase(StringPieceType string1,
                                 StringPieceType string2) {
+  static decltype(::CharUpperW)* const char_upper_api =
+      reinterpret_cast<decltype(::CharUpperW)*>(
+          ::GetProcAddress(::GetModuleHandle(L"user32.dll"), "CharUpperW"));
+  CHECK(char_upper_api);
   // Perform character-wise upper case comparison rather than using the
   // fully Unicode-aware CompareString(). For details see:
   // http://blogs.msdn.com/michkap/archive/2005/10/17/481600.aspx
@@ -694,9 +708,9 @@
   StringPieceType::const_iterator string2end = string2.end();
   for ( ; i1 != string1end && i2 != string2end; ++i1, ++i2) {
     wchar_t c1 =
-        (wchar_t)LOWORD(::CharUpperW((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
+        (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i1, 0)));
     wchar_t c2 =
-        (wchar_t)LOWORD(::CharUpperW((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
+        (wchar_t)LOWORD(char_upper_api((LPWSTR)(DWORD_PTR)MAKELONG(*i2, 0)));
     if (c1 < c2)
       return -1;
     if (c1 > c2)
@@ -1184,6 +1198,7 @@
 }
 
 StringType FilePath::GetHFSDecomposedForm(StringPieceType string) {
+  StringType result;
   ScopedCFTypeRef<CFStringRef> cfstring(
       CFStringCreateWithBytesNoCopy(
           NULL,
@@ -1192,26 +1207,27 @@
           kCFStringEncodingUTF8,
           false,
           kCFAllocatorNull));
-  // Query the maximum length needed to store the result. In most cases this
-  // will overestimate the required space. The return value also already
-  // includes the space needed for a terminating 0.
-  CFIndex length = CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring);
-  DCHECK_GT(length, 0);  // should be at least 1 for the 0-terminator.
-  // Reserve enough space for CFStringGetFileSystemRepresentation to write into.
-  // Also set the length to the maximum so that we can shrink it later.
-  // (Increasing rather than decreasing it would clobber the string contents!)
-  StringType result;
-  result.reserve(length);
-  result.resize(length - 1);
-  Boolean success = CFStringGetFileSystemRepresentation(cfstring,
-                                                        &result[0],
-                                                        length);
-  if (success) {
-    // Reduce result.length() to actual string length.
-    result.resize(strlen(result.c_str()));
-  } else {
-    // An error occurred -> clear result.
-    result.clear();
+  if (cfstring) {
+    // Query the maximum length needed to store the result. In most cases this
+    // will overestimate the required space. The return value also already
+    // includes the space needed for a terminating 0.
+    CFIndex length = CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring);
+    DCHECK_GT(length, 0);  // should be at least 1 for the 0-terminator.
+    // Reserve enough space for CFStringGetFileSystemRepresentation to write
+    // into. Also set the length to the maximum so that we can shrink it later.
+    // (Increasing rather than decreasing it would clobber the string contents!)
+    result.reserve(length);
+    result.resize(length - 1);
+    Boolean success = CFStringGetFileSystemRepresentation(cfstring,
+                                                          &result[0],
+                                                          length);
+    if (success) {
+      // Reduce result.length() to actual string length.
+      result.resize(strlen(result.c_str()));
+    } else {
+      // An error occurred -> clear result.
+      result.clear();
+    }
   }
   return result;
 }
@@ -1299,7 +1315,7 @@
   return NormalizePathSeparatorsTo(kSeparators[0]);
 }
 
-FilePath FilePath::NormalizePathSeparatorsTo(CharType /* separator */) const {
+FilePath FilePath::NormalizePathSeparatorsTo(CharType separator) const {
 #if defined(FILE_PATH_USES_WIN_SEPARATORS)
   DCHECK_NE(kSeparators + kSeparatorsLength,
             std::find(kSeparators, kSeparators + kSeparatorsLength, separator));
@@ -1309,6 +1325,7 @@
   }
   return FilePath(copy);
 #else
+  (void)separator;  // Avoid an unused warning.
   return *this;
 #endif
 }
diff --git a/base/files/file_path.h b/base/files/file_path.h
index 89e9cbf..3234df7 100644
--- a/base/files/file_path.h
+++ b/base/files/file_path.h
@@ -138,6 +138,7 @@
 
 class Pickle;
 class PickleIterator;
+class PickleSizer;
 
 // An abstraction to isolate users from the differences between native
 // pathnames on different platforms.
@@ -372,11 +373,12 @@
   // internally calls SysWideToNativeMB() on POSIX systems other than Mac
   // and Chrome OS, to mitigate the encoding issue. See the comment at
   // AsUTF8Unsafe() for details.
-  static FilePath FromUTF8Unsafe(const std::string& utf8);
+  static FilePath FromUTF8Unsafe(StringPiece utf8);
 
   // Similar to FromUTF8Unsafe, but accepts UTF-16 instead.
-  static FilePath FromUTF16Unsafe(const string16& utf16);
+  static FilePath FromUTF16Unsafe(StringPiece16 utf16);
 
+  void GetSizeForPickle(PickleSizer* sizer) const;
   void WriteToPickle(Pickle* pickle) const;
   bool ReadFromPickle(PickleIterator* iter);
 
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
index b1d93a8..d8c5969 100644
--- a/base/files/file_path_unittest.cc
+++ b/base/files/file_path_unittest.cc
@@ -1296,4 +1296,23 @@
   EXPECT_EQ("foo", ss.str());
 }
 
+// Test GetHFSDecomposedForm should return empty result for invalid UTF-8
+// strings.
+#if defined(OS_MACOSX)
+TEST_F(FilePathTest, GetHFSDecomposedFormWithInvalidInput) {
+  const FilePath::CharType* cases[] = {
+    FPL("\xc3\x28"),
+    FPL("\xe2\x82\x28"),
+    FPL("\xe2\x28\xa1"),
+    FPL("\xf0\x28\x8c\xbc"),
+    FPL("\xf0\x28\x8c\x28"),
+  };
+  for (const auto& invalid_input : cases) {
+    FilePath::StringType observed = FilePath::GetHFSDecomposedForm(
+        invalid_input);
+    EXPECT_TRUE(observed.empty());
+  }
+}
+#endif
+
 }  // namespace base
diff --git a/base/files/file_path_watcher.cc b/base/files/file_path_watcher.cc
index 955e6a2..a4624ab 100644
--- a/base/files/file_path_watcher.cc
+++ b/base/files/file_path_watcher.cc
@@ -11,10 +11,6 @@
 #include "base/message_loop/message_loop.h"
 #include "build/build_config.h"
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-#include "base/mac/mac_util.h"
-#endif
-
 namespace base {
 
 FilePathWatcher::~FilePathWatcher() {
@@ -29,13 +25,11 @@
 
 // static
 bool FilePathWatcher::RecursiveWatchAvailable() {
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // FSEvents isn't available on iOS and is broken on OSX 10.6 and earlier.
-  // See http://crbug.com/54822#c31
-  return mac::IsOSLionOrLater();
-#elif defined(OS_WIN) || defined(OS_LINUX) || defined(OS_ANDROID)
+#if (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_WIN) || \
+    defined(OS_LINUX) || defined(OS_ANDROID)
   return true;
 #else
+  // FSEvents isn't available on iOS.
   return false;
 #endif
 }
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc
index 78637aa..e9d2508 100644
--- a/base/files/file_path_watcher_fsevents.cc
+++ b/base/files/file_path_watcher_fsevents.cc
@@ -4,17 +4,19 @@
 
 #include "base/files/file_path_watcher_fsevents.h"
 
+#include <dispatch/dispatch.h>
+
 #include <list>
 
 #include "base/bind.h"
 #include "base/files/file_util.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/mac/libdispatch_task_runner.h"
 #include "base/mac/scoped_cftyperef.h"
 #include "base/macros.h"
 #include "base/message_loop/message_loop.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
@@ -23,19 +25,6 @@
 // The latency parameter passed to FSEventsStreamCreate().
 const CFAbsoluteTime kEventLatencySeconds = 0.3;
 
-class FSEventsTaskRunner : public mac::LibDispatchTaskRunner {
- public:
-   FSEventsTaskRunner()
-       : mac::LibDispatchTaskRunner("org.chromium.FilePathWatcherFSEvents") {
-   }
-
- protected:
-  ~FSEventsTaskRunner() override {}
-};
-
-static LazyInstance<FSEventsTaskRunner>::Leaky g_task_runner =
-    LAZY_INSTANCE_INITIALIZER;
-
 // Resolve any symlinks in the path.
 FilePath ResolvePath(const FilePath& path) {
   const unsigned kMaxLinksToResolve = 255;
@@ -79,7 +68,12 @@
 
 }  // namespace
 
-FilePathWatcherFSEvents::FilePathWatcherFSEvents() : fsevent_stream_(NULL) {
+FilePathWatcherFSEvents::FilePathWatcherFSEvents()
+    : queue_(dispatch_queue_create(
+          base::StringPrintf(
+              "org.chromium.base.FilePathWatcher.%p", this).c_str(),
+          DISPATCH_QUEUE_SERIAL)),
+      fsevent_stream_(nullptr) {
 }
 
 bool FilePathWatcherFSEvents::Watch(const FilePath& path,
@@ -98,9 +92,14 @@
   callback_ = callback;
 
   FSEventStreamEventId start_event = FSEventsGetCurrentEventId();
-  g_task_runner.Get().PostTask(
-      FROM_HERE, Bind(&FilePathWatcherFSEvents::StartEventStream, this,
-                      start_event, path));
+  // The block runtime would implicitly capture the reference, not the object
+  // it's referencing. Copy the path into a local, so that the value is
+  // captured by the block's scope.
+  const FilePath path_copy(path);
+
+  dispatch_async(queue_, ^{
+      StartEventStream(start_event, path_copy);
+  });
   return true;
 }
 
@@ -108,10 +107,12 @@
   set_cancelled();
   callback_.Reset();
 
-  // Switch to the dispatch queue thread to tear down the event stream.
-  g_task_runner.Get().PostTask(
-      FROM_HERE,
-      Bind(&FilePathWatcherFSEvents::CancelOnMessageLoopThread, this));
+  // Switch to the dispatch queue to tear down the event stream. As the queue
+  // is owned by this object, and this method is called from the destructor,
+  // execute the block synchronously.
+  dispatch_sync(queue_, ^{
+      CancelOnMessageLoopThread();
+  });
 }
 
 // static
@@ -124,8 +125,6 @@
     const FSEventStreamEventId event_ids[]) {
   FilePathWatcherFSEvents* watcher =
       reinterpret_cast<FilePathWatcherFSEvents*>(event_watcher);
-  DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
-
   bool root_changed = watcher->ResolveTargetPath();
   std::vector<FilePath> paths;
   FSEventStreamEventId root_change_at = FSEventStreamGetLatestEventId(stream);
@@ -144,10 +143,9 @@
   if (root_changed) {
     // Resetting the event stream from within the callback fails (FSEvents spews
     // bad file descriptor errors), so post a task to do the reset.
-    g_task_runner.Get().PostTask(
-        FROM_HERE,
-        Bind(&FilePathWatcherFSEvents::UpdateEventStream, watcher,
-             root_change_at));
+    dispatch_async(watcher->queue_, ^{
+        watcher->UpdateEventStream(root_change_at);
+    });
   }
 
   watcher->OnFilePathsChanged(paths);
@@ -165,7 +163,6 @@
 
 void FilePathWatcherFSEvents::OnFilePathsChanged(
     const std::vector<FilePath>& paths) {
-  DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
   DCHECK(!resolved_target_.empty());
   task_runner()->PostTask(
       FROM_HERE, Bind(&FilePathWatcherFSEvents::DispatchEvents, this, paths,
@@ -194,7 +191,6 @@
   // For all other implementations, the "message loop thread" is the IO thread,
   // as returned by task_runner(). This implementation, however, needs to
   // cancel pending work on the Dispatch Queue thread.
-  DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
 
   if (fsevent_stream_) {
     DestroyEventStream();
@@ -205,8 +201,6 @@
 
 void FilePathWatcherFSEvents::UpdateEventStream(
     FSEventStreamEventId start_event) {
-  DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
-
   // It can happen that the watcher gets canceled while tasks that call this
   // function are still in flight, so abort if this situation is detected.
   if (resolved_target_.empty())
@@ -237,8 +231,7 @@
                                         start_event,
                                         kEventLatencySeconds,
                                         kFSEventStreamCreateFlagWatchRoot);
-  FSEventStreamSetDispatchQueue(fsevent_stream_,
-                                g_task_runner.Get().GetDispatchQueue());
+  FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
 
   if (!FSEventStreamStart(fsevent_stream_)) {
     task_runner()->PostTask(
@@ -247,7 +240,6 @@
 }
 
 bool FilePathWatcherFSEvents::ResolveTargetPath() {
-  DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
   FilePath resolved = ResolvePath(target_).StripTrailingSeparators();
   bool changed = resolved != resolved_target_;
   resolved_target_ = resolved;
@@ -274,7 +266,6 @@
 
 void FilePathWatcherFSEvents::StartEventStream(FSEventStreamEventId start_event,
                                                const FilePath& path) {
-  DCHECK(g_task_runner.Get().RunsTasksOnCurrentThread());
   DCHECK(resolved_target_.empty());
 
   target_ = path;
diff --git a/base/files/file_path_watcher_fsevents.h b/base/files/file_path_watcher_fsevents.h
index 1ebe463..cfbe020 100644
--- a/base/files/file_path_watcher_fsevents.h
+++ b/base/files/file_path_watcher_fsevents.h
@@ -12,6 +12,7 @@
 
 #include "base/files/file_path.h"
 #include "base/files/file_path_watcher.h"
+#include "base/mac/scoped_dispatch_object.h"
 #include "base/macros.h"
 
 namespace base {
@@ -76,16 +77,19 @@
   // (Only accessed from the message_loop() thread.)
   FilePathWatcher::Callback callback_;
 
+  // The dispatch queue on which the the event stream is scheduled.
+  ScopedDispatchObject<dispatch_queue_t> queue_;
+
   // Target path to watch (passed to callback).
-  // (Only accessed from the libdispatch thread.)
+  // (Only accessed from the libdispatch queue.)
   FilePath target_;
 
   // Target path with all symbolic links resolved.
-  // (Only accessed from the libdispatch thread.)
+  // (Only accessed from the libdispatch queue.)
   FilePath resolved_target_;
 
   // Backend stream we receive event callbacks from (strong reference).
-  // (Only accessed from the libdispatch thread.)
+  // (Only accessed from the libdispatch queue.)
   FSEventStreamRef fsevent_stream_;
 
   DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents);
diff --git a/base/files/file_path_watcher_kqueue.cc b/base/files/file_path_watcher_kqueue.cc
index b6e61ab..6d034cd 100644
--- a/base/files/file_path_watcher_kqueue.cc
+++ b/base/files/file_path_watcher_kqueue.cc
@@ -12,7 +12,7 @@
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/strings/stringprintf.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 // On some platforms these are not defined.
 #if !defined(EV_RECEIPT)
diff --git a/base/files/file_path_watcher_linux.cc b/base/files/file_path_watcher_linux.cc
index a75eaba..87bddd3 100644
--- a/base/files/file_path_watcher_linux.cc
+++ b/base/files/file_path_watcher_linux.cc
@@ -14,6 +14,7 @@
 
 #include <algorithm>
 #include <map>
+#include <memory>
 #include <set>
 #include <utility>
 #include <vector>
@@ -27,13 +28,12 @@
 #include "base/location.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/single_thread_task_runner.h"
 #include "base/stl_util.h"
 #include "base/synchronization/lock.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
 
 namespace base {
@@ -677,7 +677,7 @@
     if (watches_[i].subdir.empty())
       return false;
   }
-  return watches_[watches_.size() - 1].subdir.empty();
+  return watches_.back().subdir.empty();
 }
 
 }  // namespace
diff --git a/base/files/file_path_watcher_unittest.cc b/base/files/file_path_watcher_unittest.cc
index a860b13..a40e485 100644
--- a/base/files/file_path_watcher_unittest.cc
+++ b/base/files/file_path_watcher_unittest.cc
@@ -28,8 +28,8 @@
 #include "base/synchronization/waitable_event.h"
 #include "base/test/test_file_util.h"
 #include "base/test/test_timeouts.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -196,7 +196,11 @@
 
   bool WaitForEvents() WARN_UNUSED_RESULT {
     collector_->Reset();
-    loop_.Run();
+    // Make sure we timeout if we don't get notified.
+    loop_.PostDelayedTask(FROM_HERE,
+                          MessageLoop::QuitWhenIdleClosure(),
+                          TestTimeouts::action_timeout());
+    RunLoop().Run();
     return collector_->Success();
   }
 
@@ -215,7 +219,8 @@
                                      FilePathWatcher* watcher,
                                      TestDelegateBase* delegate,
                                      bool recursive_watch) {
-  base::WaitableEvent completion(false, false);
+  base::WaitableEvent completion(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
   bool result;
   file_thread_.task_runner()->PostTask(
       FROM_HERE, base::Bind(SetupWatchCallback, target, watcher, delegate,
@@ -227,7 +232,7 @@
 // Basic test: Create the file and verify that we notice.
 TEST_F(FilePathWatcherTest, NewFile) {
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   ASSERT_TRUE(WriteFile(test_file(), "content"));
@@ -240,7 +245,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is modified.
@@ -255,7 +260,7 @@
   ASSERT_TRUE(WriteFile(source_file, "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is modified.
@@ -268,7 +273,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is deleted.
@@ -296,7 +301,7 @@
   FilePathWatcher* watcher() const { return watcher_.get(); }
 
  private:
-  scoped_ptr<FilePathWatcher> watcher_;
+  std::unique_ptr<FilePathWatcher> watcher_;
   MessageLoop* loop_;
 
   DISALLOW_COPY_AND_ASSIGN(Deleter);
@@ -306,7 +311,7 @@
 TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
   FilePathWatcher* watcher = new FilePathWatcher;
   // Takes ownership of watcher.
-  scoped_ptr<Deleter> deleter(new Deleter(watcher, &loop_));
+  std::unique_ptr<Deleter> deleter(new Deleter(watcher, &loop_));
   ASSERT_TRUE(SetupWatch(test_file(), watcher, deleter.get(), false));
 
   ASSERT_TRUE(WriteFile(test_file(), "content"));
@@ -321,7 +326,7 @@
 // notification.
 // Flaky on MacOS (and ARM linux): http://crbug.com/85930
 TEST_F(FilePathWatcherTest, DISABLED_DestroyWithPendingNotification) {
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   FilePathWatcher* watcher = new FilePathWatcher;
   ASSERT_TRUE(SetupWatch(test_file(), watcher, delegate.get(), false));
   ASSERT_TRUE(WriteFile(test_file(), "content"));
@@ -331,8 +336,8 @@
 
 TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
   FilePathWatcher watcher1, watcher2;
-  scoped_ptr<TestDelegate> delegate1(new TestDelegate(collector()));
-  scoped_ptr<TestDelegate> delegate2(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate1(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate2(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher1, delegate1.get(), false));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher2, delegate2.get(), false));
 
@@ -348,7 +353,7 @@
   FilePathWatcher watcher;
   FilePath dir(temp_dir_.path().AppendASCII("dir"));
   FilePath file(dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::CreateDirectory(dir));
@@ -381,7 +386,7 @@
 
   FilePathWatcher watcher;
   FilePath file(path.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
 
   FilePath sub_path(temp_dir_.path());
@@ -411,7 +416,7 @@
   FilePath file(dir.AppendASCII("file"));
   ASSERT_TRUE(base::CreateDirectory(dir));
   ASSERT_TRUE(WriteFile(file, "content"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::DeleteFile(dir, true));
@@ -423,7 +428,7 @@
 TEST_F(FilePathWatcherTest, DeleteAndRecreate) {
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::DeleteFile(test_file(), false));
@@ -441,7 +446,7 @@
   FilePath dir(temp_dir_.path().AppendASCII("dir"));
   FilePath file1(dir.AppendASCII("file1"));
   FilePath file2(dir.AppendASCII("file2"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(dir, &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::CreateDirectory(dir));
@@ -476,9 +481,9 @@
   FilePath dest(temp_dir_.path().AppendASCII("dest"));
   FilePath subdir(dir.AppendASCII("subdir"));
   FilePath file(subdir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &file_watcher, file_delegate.get(), false));
-  scoped_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(subdir, &subdir_watcher, subdir_delegate.get(),
                          false));
 
@@ -499,7 +504,7 @@
 TEST_F(FilePathWatcherTest, RecursiveWatch) {
   FilePathWatcher watcher;
   FilePath dir(temp_dir_.path().AppendASCII("dir"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   bool setup_result = SetupWatch(dir, &watcher, delegate.get(), true);
   if (!FilePathWatcher::RecursiveWatchAvailable()) {
     ASSERT_FALSE(setup_result);
@@ -579,7 +584,7 @@
   FilePath test_dir(temp_dir_.path().AppendASCII("test_dir"));
   ASSERT_TRUE(base::CreateDirectory(test_dir));
   FilePath symlink(test_dir.AppendASCII("symlink"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(symlink, &watcher, delegate.get(), true));
 
   // Link creation.
@@ -626,9 +631,9 @@
   ASSERT_TRUE(base::CreateDirectory(source_subdir));
   ASSERT_TRUE(WriteFile(source_file, "content"));
 
-  scoped_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(dest_file, &file_watcher, file_delegate.get(), false));
-  scoped_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(dest_subdir, &subdir_watcher, subdir_delegate.get(),
                          false));
 
@@ -651,7 +656,7 @@
 TEST_F(FilePathWatcherTest, FileAttributesChanged) {
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is modified.
@@ -665,7 +670,7 @@
 // Verify that creating a symlink is caught.
 TEST_F(FilePathWatcherTest, CreateLink) {
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -683,7 +688,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the link is deleted.
@@ -698,7 +703,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink.
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -713,7 +718,7 @@
 TEST_F(FilePathWatcherTest, CreateTargetLinkedFile) {
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink.
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -729,7 +734,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink.
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -747,7 +752,7 @@
   FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // dir/file should exist.
   ASSERT_TRUE(base::CreateDirectory(dir));
   ASSERT_TRUE(WriteFile(file, "content"));
@@ -776,7 +781,7 @@
   FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Now create the link from dir.lnk pointing to dir but
   // neither dir nor dir/file exist yet.
   ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
@@ -806,7 +811,7 @@
   FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(base::CreateDirectory(dir));
   ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
   // Note that we are watching dir.lnk/file but the file doesn't exist yet.
@@ -883,15 +888,15 @@
   ASSERT_TRUE(WriteFile(test_file, "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file, &watcher, delegate.get(), false));
 
   // We should not get notified in this case as it hasn't affected our ability
   // to access the file.
   ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, false));
-  loop_.PostDelayedTask(FROM_HERE,
-                        MessageLoop::QuitWhenIdleClosure(),
-                        TestTimeouts::tiny_timeout());
+  loop_.task_runner()->PostDelayedTask(FROM_HERE,
+                                       MessageLoop::QuitWhenIdleClosure(),
+                                       TestTimeouts::tiny_timeout());
   ASSERT_FALSE(WaitForEvents());
   ASSERT_TRUE(ChangeFilePermissions(test_dir1, Read, true));
 
diff --git a/base/files/file_util.cc b/base/files/file_util.cc
index 9e35b67..80fa44f 100644
--- a/base/files/file_util.cc
+++ b/base/files/file_util.cc
@@ -124,9 +124,9 @@
 }
 #endif  // !defined(OS_NACL_NONSFI)
 
-bool ReadFileToString(const FilePath& path,
-                      std::string* contents,
-                      size_t max_size) {
+bool ReadFileToStringWithMaxSize(const FilePath& path,
+                                 std::string* contents,
+                                 size_t max_size) {
   if (contents)
     contents->clear();
   if (path.ReferencesParent())
@@ -137,7 +137,7 @@
   }
 
   const size_t kBufferSize = 1 << 16;
-  scoped_ptr<char[]> buf(new char[kBufferSize]);
+  std::unique_ptr<char[]> buf(new char[kBufferSize]);
   size_t len;
   size_t size = 0;
   bool read_status = true;
@@ -162,7 +162,8 @@
 }
 
 bool ReadFileToString(const FilePath& path, std::string* contents) {
-  return ReadFileToString(path, contents, std::numeric_limits<size_t>::max());
+  return ReadFileToStringWithMaxSize(path, contents,
+                                     std::numeric_limits<size_t>::max());
 }
 
 #if !defined(OS_NACL_NONSFI)
diff --git a/base/files/file_util.h b/base/files/file_util.h
index dfc10a3..420dcae 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -19,7 +19,6 @@
 #include "base/base_export.h"
 #include "base/files/file.h"
 #include "base/files/file_path.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string16.h"
 #include "build/build_config.h"
 
@@ -38,6 +37,7 @@
 
 namespace base {
 
+class Environment;
 class Time;
 
 //-----------------------------------------------------------------------------
@@ -154,9 +154,9 @@
 // |max_size|.
 // |contents| may be NULL, in which case this function is useful for its side
 // effect of priming the disk cache (could be used for unit tests).
-BASE_EXPORT bool ReadFileToString(const FilePath& path,
-                                  std::string* contents,
-                                  size_t max_size);
+BASE_EXPORT bool ReadFileToStringWithMaxSize(const FilePath& path,
+                                             std::string* contents,
+                                             size_t max_size);
 
 #if defined(OS_POSIX)
 
@@ -200,6 +200,11 @@
 // the permission of a file which the symlink points to.
 BASE_EXPORT bool SetPosixFilePermissions(const FilePath& path, int mode);
 
+// Returns true iff |executable| can be found in any directory specified by the
+// environment variable in |env|.
+BASE_EXPORT bool ExecutableExistsInPath(Environment* env,
+                                        const FilePath::StringType& executable);
+
 #endif  // OS_POSIX
 
 // Returns true if the given directory is empty
@@ -289,6 +294,10 @@
 // be resolved with this function.
 BASE_EXPORT bool NormalizeToNativeFilePath(const FilePath& path,
                                            FilePath* nt_path);
+
+// Given an existing file in |path|, returns whether this file is on a network
+// drive or not. If |path| does not exist, this function returns false.
+BASE_EXPORT bool IsOnNetworkDrive(const base::FilePath& path);
 #endif
 
 // This function will return if the given file is a symlink or not.
diff --git a/base/files/file_util_linux.cc b/base/files/file_util_linux.cc
index 7999392..b0c6e03 100644
--- a/base/files/file_util_linux.cc
+++ b/base/files/file_util_linux.cc
@@ -24,6 +24,9 @@
 
   // Not all possible |statfs_buf.f_type| values are in linux/magic.h.
   // Missing values are copied from the statfs man page.
+  // In some platforms, |statfs_buf.f_type| is declared as signed, but some of
+  // the values will overflow it, causing narrowing warnings. Cast to the
+  // largest possible unsigned integer type to avoid it.
   switch (static_cast<uintmax_t>(statfs_buf.f_type)) {
     case 0:
       *type = FILE_SYSTEM_0;
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
index e2e4446..85a1b41 100644
--- a/base/files/file_util_posix.cc
+++ b/base/files/file_util_posix.cc
@@ -22,15 +22,16 @@
 #include <time.h>
 #include <unistd.h>
 
+#include "base/environment.h"
 #include "base/files/file_enumerator.h"
 #include "base/files/file_path.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/singleton.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/stl_util.h"
+#include "base/strings/string_split.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/strings/sys_string_conversions.h"
@@ -351,12 +352,12 @@
 #endif  // !defined(OS_NACL_NONSFI)
 
 bool SetNonBlocking(int fd) {
-  int flags = fcntl(fd, F_GETFL, 0);
+  const int flags = fcntl(fd, F_GETFL);
   if (flags == -1)
     return false;
   if (flags & O_NONBLOCK)
     return true;
-  if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1)
+  if (HANDLE_EINTR(fcntl(fd, F_SETFL, flags | O_NONBLOCK)) == -1)
     return false;
   return true;
 }
@@ -456,6 +457,25 @@
   return true;
 }
 
+bool ExecutableExistsInPath(Environment* env,
+                            const FilePath::StringType& executable) {
+  std::string path;
+  if (!env->GetVar("PATH", &path)) {
+    LOG(ERROR) << "No $PATH variable. Assuming no " << executable << ".";
+    return false;
+  }
+
+  for (const StringPiece& cur_path :
+       SplitStringPiece(path, ":", KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY)) {
+    FilePath file(cur_path);
+    int permissions;
+    if (GetPosixFilePermissions(file.Append(executable), &permissions) &&
+        (permissions & FILE_PERMISSION_EXECUTE_BY_USER))
+      return true;
+  }
+  return false;
+}
+
 #if !defined(OS_MACOSX)
 // This is implemented in file_util_mac.mm for Mac.
 bool GetTempDir(FilePath* path) {
@@ -560,7 +580,7 @@
   return CreateTemporaryDirInDirImpl(base_dir, mkdtemp_template, new_dir);
 }
 
-bool CreateNewTempDirectory(const FilePath::StringType& /* prefix */,
+bool CreateNewTempDirectory(const FilePath::StringType& /*prefix*/,
                             FilePath* new_temp_path) {
   FilePath tmpdir;
   if (!GetTempDir(&tmpdir))
@@ -849,7 +869,7 @@
     return true;
   }
 #else
-  (void)executable;  // avoid unused warning when !defined(OS_LINUX)
+  (void)executable;  // Avoid unused warning when !defined(OS_LINUX).
 #endif
   return GetTempDir(path);
 }
diff --git a/base/files/important_file_writer.cc b/base/files/important_file_writer.cc
index b429305..28550ad 100644
--- a/base/files/important_file_writer.cc
+++ b/base/files/important_file_writer.cc
@@ -49,15 +49,16 @@
 };
 
 void LogFailure(const FilePath& path, TempFileFailure failure_code,
-                const std::string& message) {
+                StringPiece message) {
   UMA_HISTOGRAM_ENUMERATION("ImportantFile.TempFileFailures", failure_code,
                             TEMP_FILE_FAILURE_MAX);
   DPLOG(WARNING) << "temp file failure: " << path.value() << " : " << message;
 }
 
-// Helper function to call WriteFileAtomically() with a scoped_ptr<std::string>.
+// Helper function to call WriteFileAtomically() with a
+// std::unique_ptr<std::string>.
 bool WriteScopedStringToFileAtomically(const FilePath& path,
-                                       scoped_ptr<std::string> data) {
+                                       std::unique_ptr<std::string> data) {
   return ImportantFileWriter::WriteFileAtomically(path, *data);
 }
 
@@ -65,7 +66,7 @@
 
 // static
 bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
-                                              const std::string& data) {
+                                              StringPiece data) {
 #if defined(OS_CHROMEOS)
   // On Chrome OS, chrome gets killed when it cannot finish shutdown quickly,
   // and this function seems to be one of the slowest shutdown steps.
@@ -126,19 +127,18 @@
 
 ImportantFileWriter::ImportantFileWriter(
     const FilePath& path,
-    const scoped_refptr<SequencedTaskRunner>& task_runner)
+    scoped_refptr<SequencedTaskRunner> task_runner)
     : ImportantFileWriter(
-        path,
-        task_runner,
-        TimeDelta::FromMilliseconds(kDefaultCommitIntervalMs)) {
-}
+          path,
+          std::move(task_runner),
+          TimeDelta::FromMilliseconds(kDefaultCommitIntervalMs)) {}
 
 ImportantFileWriter::ImportantFileWriter(
     const FilePath& path,
-    const scoped_refptr<SequencedTaskRunner>& task_runner,
+    scoped_refptr<SequencedTaskRunner> task_runner,
     TimeDelta interval)
     : path_(path),
-      task_runner_(task_runner),
+      task_runner_(std::move(task_runner)),
       serializer_(nullptr),
       commit_interval_(interval),
       weak_factory_(this) {
@@ -158,7 +158,7 @@
   return timer_.IsRunning();
 }
 
-void ImportantFileWriter::WriteNow(scoped_ptr<std::string> data) {
+void ImportantFileWriter::WriteNow(std::unique_ptr<std::string> data) {
   DCHECK(CalledOnValidThread());
   if (!IsValueInRangeForNumericType<int32_t>(data->length())) {
     NOTREACHED();
@@ -193,7 +193,7 @@
 
 void ImportantFileWriter::DoScheduledWrite() {
   DCHECK(serializer_);
-  scoped_ptr<std::string> data(new std::string);
+  std::unique_ptr<std::string> data(new std::string);
   if (serializer_->SerializeData(data.get())) {
     WriteNow(std::move(data));
   } else {
diff --git a/base/files/important_file_writer.h b/base/files/important_file_writer.h
index 1b2ad5c..0bd8a7f 100644
--- a/base/files/important_file_writer.h
+++ b/base/files/important_file_writer.h
@@ -12,6 +12,7 @@
 #include "base/files/file_path.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
 #include "base/threading/non_thread_safe.h"
 #include "base/time/time.h"
 #include "base/timer/timer.h"
@@ -36,7 +37,7 @@
 // temporary file to target filename.
 //
 // If you want to know more about this approach and ext3/ext4 fsync issues, see
-// http://valhenson.livejournal.com/37921.html
+// http://blog.valerieaurora.org/2009/04/16/dont-panic-fsync-ext34-and-your-data/
 class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
  public:
   // Used by ScheduleSave to lazily provide the data to be saved. Allows us
@@ -54,8 +55,7 @@
 
   // Save |data| to |path| in an atomic manner (see the class comment above).
   // Blocks and writes data on the current thread.
-  static bool WriteFileAtomically(const FilePath& path,
-                                  const std::string& data);
+  static bool WriteFileAtomically(const FilePath& path, StringPiece data);
 
   // Initialize the writer.
   // |path| is the name of file to write.
@@ -63,11 +63,11 @@
   // execute file I/O operations.
   // All non-const methods, ctor and dtor must be called on the same thread.
   ImportantFileWriter(const FilePath& path,
-                      const scoped_refptr<SequencedTaskRunner>& task_runner);
+                      scoped_refptr<SequencedTaskRunner> task_runner);
 
   // Same as above, but with a custom commit interval.
   ImportantFileWriter(const FilePath& path,
-                      const scoped_refptr<SequencedTaskRunner>& task_runner,
+                      scoped_refptr<SequencedTaskRunner> task_runner,
                       TimeDelta interval);
 
   // You have to ensure that there are no pending writes at the moment
@@ -82,7 +82,7 @@
 
   // Save |data| to target filename. Does not block. If there is a pending write
   // scheduled by ScheduleWrite(), it is cancelled.
-  void WriteNow(scoped_ptr<std::string> data);
+  void WriteNow(std::unique_ptr<std::string> data);
 
   // Schedule a save to target filename. Data will be serialized and saved
   // to disk after the commit interval. If another ScheduleWrite is issued
diff --git a/base/files/important_file_writer_unittest.cc b/base/files/important_file_writer_unittest.cc
index 28e6001..43e051e 100644
--- a/base/files/important_file_writer_unittest.cc
+++ b/base/files/important_file_writer_unittest.cc
@@ -12,10 +12,11 @@
 #include "base/location.h"
 #include "base/logging.h"
 #include "base/macros.h"
+#include "base/memory/ptr_util.h"
 #include "base/run_loop.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -103,7 +104,7 @@
   ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
   EXPECT_FALSE(PathExists(writer.path()));
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
-  writer.WriteNow(make_scoped_ptr(new std::string("foo")));
+  writer.WriteNow(WrapUnique(new std::string("foo")));
   RunLoop().RunUntilIdle();
 
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
@@ -116,7 +117,7 @@
   EXPECT_FALSE(PathExists(writer.path()));
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
   successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
-  writer.WriteNow(make_scoped_ptr(new std::string("foo")));
+  writer.WriteNow(WrapUnique(new std::string("foo")));
   RunLoop().RunUntilIdle();
 
   // Confirm that the observer is invoked.
@@ -127,7 +128,7 @@
   // Confirm that re-installing the observer works for another write.
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
   successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
-  writer.WriteNow(make_scoped_ptr(new std::string("bar")));
+  writer.WriteNow(WrapUnique(new std::string("bar")));
   RunLoop().RunUntilIdle();
 
   EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
@@ -137,7 +138,7 @@
   // Confirm that writing again without re-installing the observer doesn't
   // result in a notification.
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
-  writer.WriteNow(make_scoped_ptr(new std::string("baz")));
+  writer.WriteNow(WrapUnique(new std::string("baz")));
   RunLoop().RunUntilIdle();
 
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
@@ -156,7 +157,7 @@
   ThreadTaskRunnerHandle::Get()->PostDelayedTask(
       FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
       TimeDelta::FromMilliseconds(100));
-  MessageLoop::current()->Run();
+  RunLoop().Run();
   EXPECT_FALSE(writer.HasPendingWrite());
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("foo", GetFileContent(writer.path()));
@@ -172,7 +173,7 @@
   ThreadTaskRunnerHandle::Get()->PostDelayedTask(
       FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
       TimeDelta::FromMilliseconds(100));
-  MessageLoop::current()->Run();
+  RunLoop().Run();
   EXPECT_FALSE(writer.HasPendingWrite());
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("foo", GetFileContent(writer.path()));
@@ -189,7 +190,7 @@
   ThreadTaskRunnerHandle::Get()->PostDelayedTask(
       FROM_HERE, MessageLoop::QuitWhenIdleClosure(),
       TimeDelta::FromMilliseconds(100));
-  MessageLoop::current()->Run();
+  RunLoop().Run();
   ASSERT_TRUE(PathExists(writer.path()));
   EXPECT_EQ("baz", GetFileContent(writer.path()));
 }
diff --git a/base/files/memory_mapped_file.cc b/base/files/memory_mapped_file.cc
index 0fd9d67..67890d6 100644
--- a/base/files/memory_mapped_file.cc
+++ b/base/files/memory_mapped_file.cc
@@ -30,18 +30,30 @@
 }
 
 #if !defined(OS_NACL)
-bool MemoryMappedFile::Initialize(const FilePath& file_name) {
+bool MemoryMappedFile::Initialize(const FilePath& file_name, Access access) {
   if (IsValid())
     return false;
 
-  file_.Initialize(file_name, File::FLAG_OPEN | File::FLAG_READ);
+  uint32_t flags = 0;
+  switch (access) {
+    case READ_ONLY:
+      flags = File::FLAG_OPEN | File::FLAG_READ;
+      break;
+    case READ_WRITE:
+      flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE;
+      break;
+    case READ_WRITE_EXTEND:
+      // Can't open with "extend" because no maximum size is known.
+      NOTREACHED();
+  }
+  file_.Initialize(file_name, flags);
 
   if (!file_.IsValid()) {
     DLOG(ERROR) << "Couldn't open " << file_name.AsUTF8Unsafe();
     return false;
   }
 
-  if (!MapFileRegionToMemory(Region::kWholeFile)) {
+  if (!MapFileRegionToMemory(Region::kWholeFile, access)) {
     CloseHandles();
     return false;
   }
@@ -49,11 +61,32 @@
   return true;
 }
 
-bool MemoryMappedFile::Initialize(File file) {
-  return Initialize(std::move(file), Region::kWholeFile);
+bool MemoryMappedFile::Initialize(File file, Access access) {
+  DCHECK_NE(READ_WRITE_EXTEND, access);
+  return Initialize(std::move(file), Region::kWholeFile, access);
 }
 
-bool MemoryMappedFile::Initialize(File file, const Region& region) {
+bool MemoryMappedFile::Initialize(File file,
+                                  const Region& region,
+                                  Access access) {
+  switch (access) {
+    case READ_WRITE_EXTEND:
+      // Ensure that the extended size is within limits of File.
+      if (region.size > std::numeric_limits<int64_t>::max() - region.offset) {
+        DLOG(ERROR) << "Region bounds exceed maximum for base::File.";
+        return false;
+      }
+      // Fall through.
+    case READ_ONLY:
+    case READ_WRITE:
+      // Ensure that the region values are valid.
+      if (region.offset < 0 || region.size < 0) {
+        DLOG(ERROR) << "Region bounds are not valid.";
+        return false;
+      }
+      break;
+  }
+
   if (IsValid())
     return false;
 
@@ -64,7 +97,7 @@
 
   file_ = std::move(file);
 
-  if (!MapFileRegionToMemory(region)) {
+  if (!MapFileRegionToMemory(region, access)) {
     CloseHandles();
     return false;
   }
diff --git a/base/files/memory_mapped_file.h b/base/files/memory_mapped_file.h
index 6362e76..cad99f6 100644
--- a/base/files/memory_mapped_file.h
+++ b/base/files/memory_mapped_file.h
@@ -23,6 +23,29 @@
 
 class BASE_EXPORT MemoryMappedFile {
  public:
+  enum Access {
+    // Mapping a file into memory effectively allows for file I/O on any thread.
+    // The accessing thread could be paused while data from the file is paged
+    // into memory. Worse, a corrupted filesystem could cause a SEGV within the
+    // program instead of just an I/O error.
+    READ_ONLY,
+
+    // This provides read/write access to a file and must be used with care of
+    // the additional subtleties involved in doing so. Though the OS will do
+    // the writing of data on its own time, too many dirty pages can cause
+    // the OS to pause the thread while it writes them out. The pause can
+    // be as much as 1s on some systems.
+    READ_WRITE,
+
+    // This provides read/write access but with the ability to write beyond
+    // the end of the existing file up to a maximum size specified as the
+    // "region". Depending on the OS, the file may or may not be immediately
+    // extended to the maximum size though it won't be loaded in RAM until
+    // needed. Note, however, that the maximum size will still be reserved
+    // in the process address space.
+    READ_WRITE_EXTEND,
+  };
+
   // The default constructor sets all members to invalid/null values.
   MemoryMappedFile();
   ~MemoryMappedFile();
@@ -41,27 +64,37 @@
     int64_t size;
   };
 
-  // Opens an existing file and maps it into memory. Access is restricted to
-  // read only. If this object already points to a valid memory mapped file
-  // then this method will fail and return false. If it cannot open the file,
-  // the file does not exist, or the memory mapping fails, it will return false.
-  // Later we may want to allow the user to specify access.
-  bool Initialize(const FilePath& file_name);
+  // Opens an existing file and maps it into memory. |access| can be read-only
+  // or read/write but not read/write+extend. If this object already points
+  // to a valid memory mapped file then this method will fail and return
+  // false. If it cannot open the file, the file does not exist, or the
+  // memory mapping fails, it will return false.
+  bool Initialize(const FilePath& file_name, Access access);
+  bool Initialize(const FilePath& file_name) {
+    return Initialize(file_name, READ_ONLY);
+  }
 
-  // As above, but works with an already-opened file. MemoryMappedFile takes
-  // ownership of |file| and closes it when done.
-  bool Initialize(File file);
+  // As above, but works with an already-opened file. |access| can be read-only
+  // or read/write but not read/write+extend. MemoryMappedFile takes ownership
+  // of |file| and closes it when done. |file| must have been opened with
+  // permissions suitable for |access|. If the memory mapping fails, it will
+  // return false.
+  bool Initialize(File file, Access access);
+  bool Initialize(File file) {
+    return Initialize(std::move(file), READ_ONLY);
+  }
 
-  // As above, but works with a region of an already-opened file.
-  bool Initialize(File file, const Region& region);
-
-#if defined(OS_WIN)
-  // Opens an existing file and maps it as an image section. Please refer to
-  // the Initialize function above for additional information.
-  bool InitializeAsImageSection(const FilePath& file_name);
-#endif  // OS_WIN
+  // As above, but works with a region of an already-opened file. All forms of
+  // |access| are allowed. If READ_WRITE_EXTEND is specified then |region|
+  // provides the maximum size of the file. If the memory mapping fails, it
+  // return false.
+  bool Initialize(File file, const Region& region, Access access);
+  bool Initialize(File file, const Region& region) {
+    return Initialize(std::move(file), region, READ_ONLY);
+  }
 
   const uint8_t* data() const { return data_; }
+  uint8_t* data() { return data_; }
   size_t length() const { return length_; }
 
   // Is file_ a valid file handle that points to an open, memory mapped file?
@@ -82,7 +115,7 @@
 
   // Map the file to memory, set data_ to that memory address. Return true on
   // success, false on any kind of failure. This is a helper for Initialize().
-  bool MapFileRegionToMemory(const Region& region);
+  bool MapFileRegionToMemory(const Region& region, Access access);
 
   // Closes all open handles.
   void CloseHandles();
@@ -93,7 +126,6 @@
 
 #if defined(OS_WIN)
   win::ScopedHandle file_mapping_;
-  bool image_;  // Map as an image.
 #endif
 
   DISALLOW_COPY_AND_ASSIGN(MemoryMappedFile);
diff --git a/base/files/memory_mapped_file_posix.cc b/base/files/memory_mapped_file_posix.cc
index 1067fdc..4899cf0 100644
--- a/base/files/memory_mapped_file_posix.cc
+++ b/base/files/memory_mapped_file_posix.cc
@@ -21,7 +21,8 @@
 
 #if !defined(OS_NACL)
 bool MemoryMappedFile::MapFileRegionToMemory(
-    const MemoryMappedFile::Region& region) {
+    const MemoryMappedFile::Region& region,
+    Access access) {
   ThreadRestrictions::AssertIOAllowed();
 
   off_t map_start = 0;
@@ -65,7 +66,23 @@
     length_ = static_cast<size_t>(region.size);
   }
 
-  data_ = static_cast<uint8_t*>(mmap(NULL, map_size, PROT_READ, MAP_SHARED,
+  int flags = 0;
+  switch (access) {
+    case READ_ONLY:
+      flags |= PROT_READ;
+      break;
+    case READ_WRITE:
+      flags |= PROT_READ | PROT_WRITE;
+      break;
+    case READ_WRITE_EXTEND:
+      // POSIX won't auto-extend the file when it is written so it must first
+      // be explicitly extended to the maximum size. Zeros will fill the new
+      // space.
+      file_.SetLength(std::max(file_.GetLength(), region.offset + region.size));
+      flags |= PROT_READ | PROT_WRITE;
+      break;
+  }
+  data_ = static_cast<uint8_t*>(mmap(NULL, map_size, flags, MAP_SHARED,
                                      file_.GetPlatformFile(), map_start));
   if (data_ == MAP_FAILED) {
     DPLOG(ERROR) << "mmap " << file_.GetPlatformFile();
diff --git a/base/files/scoped_file.cc b/base/files/scoped_file.cc
index 8971280..8ce45b8 100644
--- a/base/files/scoped_file.cc
+++ b/base/files/scoped_file.cc
@@ -8,8 +8,10 @@
 #include "build/build_config.h"
 
 #if defined(OS_POSIX)
+#include <errno.h>
 #include <unistd.h>
 
+#include "base/debug/alias.h"
 #include "base/posix/eintr_wrapper.h"
 #endif
 
@@ -27,7 +29,15 @@
   // Chrome relies on being able to "drop" such access.
   // It's especially problematic on Linux with the setuid sandbox, where
   // a single open directory would bypass the entire security model.
-  PCHECK(0 == IGNORE_EINTR(close(fd)));
+  int ret = IGNORE_EINTR(close(fd));
+
+  // TODO(davidben): Remove this once it's been determined whether
+  // https://crbug.com/603354 is caused by EBADF or a network filesystem
+  // returning some other error.
+  int close_errno = errno;
+  base::debug::Alias(&close_errno);
+
+  PCHECK(0 == ret);
 }
 
 #endif  // OS_POSIX
diff --git a/base/files/scoped_file.h b/base/files/scoped_file.h
index 106f6ad..68c0415 100644
--- a/base/files/scoped_file.h
+++ b/base/files/scoped_file.h
@@ -7,9 +7,10 @@
 
 #include <stdio.h>
 
+#include <memory>
+
 #include "base/base_export.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/scoped_generic.h"
 #include "build/build_config.h"
 
@@ -54,7 +55,7 @@
 #endif
 
 // Automatically closes |FILE*|s.
-typedef scoped_ptr<FILE, internal::ScopedFILECloser> ScopedFILE;
+typedef std::unique_ptr<FILE, internal::ScopedFILECloser> ScopedFILE;
 
 }  // namespace base
 
diff --git a/base/guid.cc b/base/guid.cc
index 99b037b..5714073 100644
--- a/base/guid.cc
+++ b/base/guid.cc
@@ -5,12 +5,21 @@
 #include "base/guid.h"
 
 #include <stddef.h>
+#include <stdint.h>
 
+#include "base/rand_util.h"
 #include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
 
 namespace base {
 
-bool IsValidGUID(const std::string& guid) {
+namespace {
+
+bool IsLowerHexDigit(char c) {
+  return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f');
+}
+
+bool IsValidGUIDInternal(const base::StringPiece& guid, bool strict) {
   const size_t kGUIDLength = 36U;
   if (guid.length() != kGUIDLength)
     return false;
@@ -21,7 +30,7 @@
       if (current != '-')
         return false;
     } else {
-      if (!IsHexDigit(current))
+      if ((strict && !IsLowerHexDigit(current)) || !IsHexDigit(current))
         return false;
     }
   }
@@ -29,4 +38,42 @@
   return true;
 }
 
+}  // namespace
+
+std::string GenerateGUID() {
+  uint64_t sixteen_bytes[2] = {base::RandUint64(), base::RandUint64()};
+
+  // Set the GUID to version 4 as described in RFC 4122, section 4.4.
+  // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+  // where y is one of [8, 9, A, B].
+
+  // Clear the version bits and set the version to 4:
+  sixteen_bytes[0] &= 0xffffffffffff0fffULL;
+  sixteen_bytes[0] |= 0x0000000000004000ULL;
+
+  // Set the two most significant bits (bits 6 and 7) of the
+  // clock_seq_hi_and_reserved to zero and one, respectively:
+  sixteen_bytes[1] &= 0x3fffffffffffffffULL;
+  sixteen_bytes[1] |= 0x8000000000000000ULL;
+
+  return RandomDataToGUIDString(sixteen_bytes);
+}
+
+bool IsValidGUID(const base::StringPiece& guid) {
+  return IsValidGUIDInternal(guid, false /* strict */);
+}
+
+bool IsValidGUIDOutputString(const base::StringPiece& guid) {
+  return IsValidGUIDInternal(guid, true /* strict */);
+}
+
+std::string RandomDataToGUIDString(const uint64_t bytes[2]) {
+  return StringPrintf("%08x-%04x-%04x-%04x-%012llx",
+                      static_cast<unsigned int>(bytes[0] >> 32),
+                      static_cast<unsigned int>((bytes[0] >> 16) & 0x0000ffff),
+                      static_cast<unsigned int>(bytes[0] & 0x0000ffff),
+                      static_cast<unsigned int>(bytes[1] >> 48),
+                      bytes[1] & 0x0000ffffffffffffULL);
+}
+
 }  // namespace base
diff --git a/base/guid.h b/base/guid.h
index c0a06f8..29c24ea 100644
--- a/base/guid.h
+++ b/base/guid.h
@@ -10,23 +10,33 @@
 #include <string>
 
 #include "base/base_export.h"
+#include "base/strings/string_piece.h"
 #include "build/build_config.h"
 
 namespace base {
 
-// Generate a 128-bit random GUID of the form: "%08X-%04X-%04X-%04X-%012llX".
+// Generate a 128-bit (pseudo) random GUID in the form of version 4 as described
+// in RFC 4122, section 4.4.
+// The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+// where y is one of [8, 9, A, B].
+// The hexadecimal values "a" through "f" are output as lower case characters.
 // If GUID generation fails an empty string is returned.
-// The POSIX implementation uses pseudo random number generation to create
-// the GUID.  The Windows implementation uses system services.
 BASE_EXPORT std::string GenerateGUID();
 
-// Returns true if the input string conforms to the GUID format.
-BASE_EXPORT bool IsValidGUID(const std::string& guid);
+// Returns true if the input string conforms to the version 4 GUID format.
+// Note that this does NOT check if the hexadecimal values "a" through "f"
+// are in lower case characters, as Version 4 RFC says onput they're
+// case insensitive. (Use IsValidGUIDOutputString for checking if the
+// given string is valid output string)
+BASE_EXPORT bool IsValidGUID(const base::StringPiece& guid);
 
-#if defined(OS_POSIX)
+// Returns true if the input string is valid version 4 GUID output string.
+// This also checks if the hexadecimal values "a" through "f" are in lower
+// case characters.
+BASE_EXPORT bool IsValidGUIDOutputString(const base::StringPiece& guid);
+
 // For unit testing purposes only.  Do not use outside of tests.
 BASE_EXPORT std::string RandomDataToGUIDString(const uint64_t bytes[2]);
-#endif
 
 }  // namespace base
 
diff --git a/base/guid_posix.cc b/base/guid_posix.cc
deleted file mode 100644
index ec1ca51..0000000
--- a/base/guid_posix.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/guid.h"
-
-#include <stdint.h>
-
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-
-namespace base {
-
-std::string GenerateGUID() {
-  uint64_t sixteen_bytes[2] = {base::RandUint64(), base::RandUint64()};
-
-  // Set the GUID to version 4 as described in RFC 4122, section 4.4.
-  // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
-  // where y is one of [8, 9, A, B].
-
-  // Clear the version bits and set the version to 4:
-  sixteen_bytes[0] &= 0xffffffffffff0fffULL;
-  sixteen_bytes[0] |= 0x0000000000004000ULL;
-
-  // Set the two most significant bits (bits 6 and 7) of the
-  // clock_seq_hi_and_reserved to zero and one, respectively:
-  sixteen_bytes[1] &= 0x3fffffffffffffffULL;
-  sixteen_bytes[1] |= 0x8000000000000000ULL;
-
-  return RandomDataToGUIDString(sixteen_bytes);
-}
-
-// TODO(cmasone): Once we're comfortable this works, migrate Windows code to
-// use this as well.
-std::string RandomDataToGUIDString(const uint64_t bytes[2]) {
-  return StringPrintf("%08X-%04X-%04X-%04X-%012llX",
-                      static_cast<unsigned int>(bytes[0] >> 32),
-                      static_cast<unsigned int>((bytes[0] >> 16) & 0x0000ffff),
-                      static_cast<unsigned int>(bytes[0] & 0x0000ffff),
-                      static_cast<unsigned int>(bytes[1] >> 48),
-                      bytes[1] & 0x0000ffffffffffffULL);
-}
-
-}  // namespace base
diff --git a/base/guid_unittest.cc b/base/guid_unittest.cc
index b6d976d..70dad67 100644
--- a/base/guid_unittest.cc
+++ b/base/guid_unittest.cc
@@ -14,8 +14,6 @@
 
 namespace base {
 
-#if defined(OS_POSIX)
-
 namespace {
 
 bool IsGUIDv4(const std::string& guid) {
@@ -37,15 +35,15 @@
 TEST(GUIDTest, GUIDGeneratesCorrectly) {
   uint64_t bytes[] = {0x0123456789ABCDEFULL, 0xFEDCBA9876543210ULL};
   std::string clientid = RandomDataToGUIDString(bytes);
-  EXPECT_EQ("01234567-89AB-CDEF-FEDC-BA9876543210", clientid);
+  EXPECT_EQ("01234567-89ab-cdef-fedc-ba9876543210", clientid);
 }
-#endif
 
 TEST(GUIDTest, GUIDCorrectlyFormatted) {
   const int kIterations = 10;
   for (int it = 0; it < kIterations; ++it) {
     std::string guid = GenerateGUID();
     EXPECT_TRUE(IsValidGUID(guid));
+    EXPECT_TRUE(IsValidGUIDOutputString(guid));
     EXPECT_TRUE(IsValidGUID(ToLowerASCII(guid)));
     EXPECT_TRUE(IsValidGUID(ToUpperASCII(guid)));
   }
@@ -59,10 +57,8 @@
     EXPECT_EQ(36U, guid1.length());
     EXPECT_EQ(36U, guid2.length());
     EXPECT_NE(guid1, guid2);
-#if defined(OS_POSIX)
     EXPECT_TRUE(IsGUIDv4(guid1));
     EXPECT_TRUE(IsGUIDv4(guid2));
-#endif
   }
 }
 
diff --git a/base/hash.cc b/base/hash.cc
new file mode 100644
index 0000000..4dfd0d0
--- /dev/null
+++ b/base/hash.cc
@@ -0,0 +1,16 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash.h"
+
+#include <functional>
+
+namespace base {
+
+uint32_t SuperFastHash(const char* data, size_t len) {
+  std::hash<std::string> hash_fn;
+  return hash_fn(std::string(data, len));
+}
+
+}  // namespace base
diff --git a/base/hash.h b/base/hash.h
index 9c0e7a5..7c0fba6 100644
--- a/base/hash.h
+++ b/base/hash.h
@@ -5,21 +5,114 @@
 #ifndef BASE_HASH_H_
 #define BASE_HASH_H_
 
-#include <cstdint>
-#include <functional>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
 #include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/logging.h"
 
 namespace base {
 
-// Deprecated: just use std::hash directly
-//
+// WARNING: This hash function should not be used for any cryptographic purpose.
+BASE_EXPORT uint32_t SuperFastHash(const char* data, size_t length);
+
+// Computes a hash of a memory buffer |data| of a given |length|.
+// WARNING: This hash function should not be used for any cryptographic purpose.
+inline uint32_t Hash(const char* data, size_t length) {
+  return SuperFastHash(data, length);
+}
+
 // Computes a hash of a string |str|.
 // WARNING: This hash function should not be used for any cryptographic purpose.
 inline uint32_t Hash(const std::string& str) {
-  std::hash<std::string> hash_fn;
-  return hash_fn(str);
+  return Hash(str.data(), str.size());
 }
 
+// Implement hashing for pairs of at-most 32 bit integer values.
+// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
+// multiply-add hashing. This algorithm, as described in
+// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
+//
+//   h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
+//
+// Contact danakj@chromium.org for any questions.
+inline size_t HashInts32(uint32_t value1, uint32_t value2) {
+  uint64_t value1_64 = value1;
+  uint64_t hash64 = (value1_64 << 32) | value2;
+
+  if (sizeof(size_t) >= sizeof(uint64_t))
+    return static_cast<size_t>(hash64);
+
+  uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
+  uint32_t shift_random = 10121U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  size_t high_bits =
+      static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+  return high_bits;
+}
+
+// Implement hashing for pairs of up-to 64-bit integer values.
+// We use the compound integer hash method to produce a 64-bit hash code, by
+// breaking the two 64-bit inputs into 4 32-bit values:
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+// Then we reduce our result to 32 bits if required, similar to above.
+inline size_t HashInts64(uint64_t value1, uint64_t value2) {
+  uint32_t short_random1 = 842304669U;
+  uint32_t short_random2 = 619063811U;
+  uint32_t short_random3 = 937041849U;
+  uint32_t short_random4 = 3309708029U;
+
+  uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
+  uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
+  uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
+  uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
+
+  uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
+  uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
+  uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
+  uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
+
+  uint64_t hash64 = product1 + product2 + product3 + product4;
+
+  if (sizeof(size_t) >= sizeof(uint64_t))
+    return static_cast<size_t>(hash64);
+
+  uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
+  uint32_t shift_random = 20591U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  size_t high_bits =
+      static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+  return high_bits;
+}
+
+template <typename T1, typename T2>
+inline size_t HashInts(T1 value1, T2 value2) {
+  // This condition is expected to be compile-time evaluated and optimised away
+  // in release builds.
+  if (sizeof(T1) > sizeof(uint32_t) || (sizeof(T2) > sizeof(uint32_t)))
+    return HashInts64(value1, value2);
+
+  return HashInts32(value1, value2);
+}
+
+// A templated hasher for pairs of integer types.
+template <typename T>
+struct IntPairHash;
+
+template <typename Type1, typename Type2>
+struct IntPairHash<std::pair<Type1, Type2>> {
+  size_t operator()(std::pair<Type1, Type2> value) const {
+    return HashInts(value.first, value.second);
+  }
+};
+
 }  // namespace base
 
 #endif  // BASE_HASH_H_
diff --git a/base/id_map.h b/base/id_map.h
index 15c6662..ef6b156 100644
--- a/base/id_map.h
+++ b/base/id_map.h
@@ -160,7 +160,7 @@
   template<class ReturnType>
   class Iterator {
    public:
-    Iterator(IDMap<T, OS>* map)
+    Iterator(IDMap<T, OS, K>* map)
         : map_(map),
           iter_(map_->data_.begin()) {
       Init();
@@ -226,7 +226,7 @@
       }
     }
 
-    IDMap<T, OS>* map_;
+    IDMap<T, OS, K>* map_;
     typename HashTable::const_iterator iter_;
   };
 
diff --git a/base/id_map_unittest.cc b/base/id_map_unittest.cc
index 7a07a28..a3f0808 100644
--- a/base/id_map_unittest.cc
+++ b/base/id_map_unittest.cc
@@ -365,6 +365,13 @@
   map.AddWithID(&obj1, kId1);
   EXPECT_EQ(&obj1, map.Lookup(kId1));
 
+  IDMap<TestObject, IDMapExternalPointer, int64_t>::const_iterator iter(&map);
+  ASSERT_FALSE(iter.IsAtEnd());
+  EXPECT_EQ(kId1, iter.GetCurrentKey());
+  EXPECT_EQ(&obj1, iter.GetCurrentValue());
+  iter.Advance();
+  ASSERT_TRUE(iter.IsAtEnd());
+
   map.Remove(kId1);
   EXPECT_TRUE(map.IsEmpty());
 }
diff --git a/base/ios/OWNERS b/base/ios/OWNERS
index dc0be62..06f5ff1 100644
--- a/base/ios/OWNERS
+++ b/base/ios/OWNERS
@@ -1,4 +1,3 @@
 droger@chromium.org
 qsr@chromium.org
 rohitrao@chromium.org
-stuartmorgan@chromium.org
diff --git a/base/json/json_file_value_serializer.cc b/base/json/json_file_value_serializer.cc
index 516f876..1a9b7a2 100644
--- a/base/json/json_file_value_serializer.cc
+++ b/base/json/json_file_value_serializer.cc
@@ -101,7 +101,7 @@
   }
 }
 
-scoped_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
+std::unique_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
     int* error_code,
     std::string* error_str) {
   std::string json_string;
diff --git a/base/json/json_file_value_serializer.h b/base/json/json_file_value_serializer.h
index f6b4e5f..67d2342 100644
--- a/base/json/json_file_value_serializer.h
+++ b/base/json/json_file_value_serializer.h
@@ -60,8 +60,8 @@
   // If |error_message| is non-null, it will be filled in with a formatted
   // error message including the location of the error if appropriate.
   // The caller takes ownership of the returned value.
-  scoped_ptr<base::Value> Deserialize(int* error_code,
-                                      std::string* error_message) override;
+  std::unique_ptr<base::Value> Deserialize(int* error_code,
+                                           std::string* error_message) override;
 
   // This enum is designed to safely overlap with JSONReader::JsonParseError.
   enum JsonFileError {
diff --git a/base/json/json_parser.cc b/base/json/json_parser.cc
index fbd4da4..d97eccc 100644
--- a/base/json/json_parser.cc
+++ b/base/json/json_parser.cc
@@ -5,10 +5,11 @@
 #include "base/json/json_parser.h"
 
 #include <cmath>
+#include <utility>
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
@@ -27,16 +28,19 @@
 
 const int32_t kExtendedASCIIStart = 0x80;
 
-// This and the class below are used to own the JSON input string for when
-// string tokens are stored as StringPiece instead of std::string. This
-// optimization avoids about 2/3rds of string memory copies. The constructor
-// takes ownership of the input string. The real root value is Swap()ed into
-// the new instance.
+// DictionaryHiddenRootValue and ListHiddenRootValue are used in conjunction
+// with JSONStringValue as an optimization for reducing the number of string
+// copies. When this optimization is active, the parser uses a hidden root to
+// keep the original JSON input string live and creates JSONStringValue children
+// holding StringPiece references to the input string, avoiding about 2/3rds of
+// string memory copies. The real root value is Swap()ed into the new instance.
 class DictionaryHiddenRootValue : public DictionaryValue {
  public:
-  DictionaryHiddenRootValue(std::string* json, Value* root) : json_(json) {
+  DictionaryHiddenRootValue(std::unique_ptr<std::string> json,
+                            std::unique_ptr<Value> root)
+      : json_(std::move(json)) {
     DCHECK(root->IsType(Value::TYPE_DICTIONARY));
-    DictionaryValue::Swap(static_cast<DictionaryValue*>(root));
+    DictionaryValue::Swap(static_cast<DictionaryValue*>(root.get()));
   }
 
   void Swap(DictionaryValue* other) override {
@@ -44,7 +48,7 @@
 
     // First deep copy to convert JSONStringValue to std::string and swap that
     // copy with |other|, which contains the new contents of |this|.
-    scoped_ptr<DictionaryValue> copy(DeepCopy());
+    std::unique_ptr<DictionaryValue> copy(CreateDeepCopy());
     copy->Swap(other);
 
     // Then erase the contents of the current dictionary and swap in the
@@ -58,7 +62,7 @@
   // the method below.
 
   bool RemoveWithoutPathExpansion(const std::string& key,
-                                  scoped_ptr<Value>* out) override {
+                                  std::unique_ptr<Value>* out) override {
     // If the caller won't take ownership of the removed value, just call up.
     if (!out)
       return DictionaryValue::RemoveWithoutPathExpansion(key, out);
@@ -67,26 +71,28 @@
 
     // Otherwise, remove the value while its still "owned" by this and copy it
     // to convert any JSONStringValues to std::string.
-    scoped_ptr<Value> out_owned;
+    std::unique_ptr<Value> out_owned;
     if (!DictionaryValue::RemoveWithoutPathExpansion(key, &out_owned))
       return false;
 
-    out->reset(out_owned->DeepCopy());
+    *out = out_owned->CreateDeepCopy();
 
     return true;
   }
 
  private:
-  scoped_ptr<std::string> json_;
+  std::unique_ptr<std::string> json_;
 
   DISALLOW_COPY_AND_ASSIGN(DictionaryHiddenRootValue);
 };
 
 class ListHiddenRootValue : public ListValue {
  public:
-  ListHiddenRootValue(std::string* json, Value* root) : json_(json) {
+  ListHiddenRootValue(std::unique_ptr<std::string> json,
+                      std::unique_ptr<Value> root)
+      : json_(std::move(json)) {
     DCHECK(root->IsType(Value::TYPE_LIST));
-    ListValue::Swap(static_cast<ListValue*>(root));
+    ListValue::Swap(static_cast<ListValue*>(root.get()));
   }
 
   void Swap(ListValue* other) override {
@@ -94,7 +100,7 @@
 
     // First deep copy to convert JSONStringValue to std::string and swap that
     // copy with |other|, which contains the new contents of |this|.
-    scoped_ptr<ListValue> copy(DeepCopy());
+    std::unique_ptr<ListValue> copy(CreateDeepCopy());
     copy->Swap(other);
 
     // Then erase the contents of the current list and swap in the new contents,
@@ -104,7 +110,7 @@
     ListValue::Swap(copy.get());
   }
 
-  bool Remove(size_t index, scoped_ptr<Value>* out) override {
+  bool Remove(size_t index, std::unique_ptr<Value>* out) override {
     // If the caller won't take ownership of the removed value, just call up.
     if (!out)
       return ListValue::Remove(index, out);
@@ -113,17 +119,17 @@
 
     // Otherwise, remove the value while its still "owned" by this and copy it
     // to convert any JSONStringValues to std::string.
-    scoped_ptr<Value> out_owned;
+    std::unique_ptr<Value> out_owned;
     if (!ListValue::Remove(index, &out_owned))
       return false;
 
-    out->reset(out_owned->DeepCopy());
+    *out = out_owned->CreateDeepCopy();
 
     return true;
   }
 
  private:
-  scoped_ptr<std::string> json_;
+  std::unique_ptr<std::string> json_;
 
   DISALLOW_COPY_AND_ASSIGN(ListHiddenRootValue);
 };
@@ -133,10 +139,8 @@
 // otherwise the referenced string will not be guaranteed to outlive it.
 class JSONStringValue : public Value {
  public:
-  explicit JSONStringValue(const StringPiece& piece)
-      : Value(TYPE_STRING),
-        string_piece_(piece) {
-  }
+  explicit JSONStringValue(StringPiece piece)
+      : Value(TYPE_STRING), string_piece_(piece) {}
 
   // Overridden from Value:
   bool GetAsString(std::string* out_value) const override {
@@ -188,9 +192,9 @@
 
 JSONParser::JSONParser(int options)
     : options_(options),
-      start_pos_(NULL),
-      pos_(NULL),
-      end_pos_(NULL),
+      start_pos_(nullptr),
+      pos_(nullptr),
+      end_pos_(nullptr),
       index_(0),
       stack_depth_(0),
       line_number_(0),
@@ -203,13 +207,13 @@
 JSONParser::~JSONParser() {
 }
 
-Value* JSONParser::Parse(const StringPiece& input) {
-  scoped_ptr<std::string> input_copy;
+std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
+  std::unique_ptr<std::string> input_copy;
   // If the children of a JSON root can be detached, then hidden roots cannot
   // be used, so do not bother copying the input because StringPiece will not
   // be used anywhere.
   if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
-    input_copy.reset(new std::string(input.as_string()));
+    input_copy = MakeUnique<std::string>(input.as_string());
     start_pos_ = input_copy->data();
   } else {
     start_pos_ = input.data();
@@ -235,15 +239,15 @@
   }
 
   // Parse the first and any nested tokens.
-  scoped_ptr<Value> root(ParseNextToken());
-  if (!root.get())
-    return NULL;
+  std::unique_ptr<Value> root(ParseNextToken());
+  if (!root)
+    return nullptr;
 
   // Make sure the input stream is at an end.
   if (GetNextToken() != T_END_OF_INPUT) {
     if (!CanConsume(1) || (NextChar() && GetNextToken() != T_END_OF_INPUT)) {
       ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1);
-      return NULL;
+      return nullptr;
     }
   }
 
@@ -251,19 +255,23 @@
   // hidden root.
   if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
     if (root->IsType(Value::TYPE_DICTIONARY)) {
-      return new DictionaryHiddenRootValue(input_copy.release(), root.get());
-    } else if (root->IsType(Value::TYPE_LIST)) {
-      return new ListHiddenRootValue(input_copy.release(), root.get());
-    } else if (root->IsType(Value::TYPE_STRING)) {
+      return MakeUnique<DictionaryHiddenRootValue>(std::move(input_copy),
+                                                   std::move(root));
+    }
+    if (root->IsType(Value::TYPE_LIST)) {
+      return MakeUnique<ListHiddenRootValue>(std::move(input_copy),
+                                             std::move(root));
+    }
+    if (root->IsType(Value::TYPE_STRING)) {
       // A string type could be a JSONStringValue, but because there's no
       // corresponding HiddenRootValue, the memory will be lost. Deep copy to
       // preserve it.
-      return root->DeepCopy();
+      return root->CreateDeepCopy();
     }
   }
 
   // All other values can be returned directly.
-  return root.release();
+  return root;
 }
 
 JSONReader::JsonParseError JSONParser::error_code() const {
@@ -285,16 +293,12 @@
 
 // StringBuilder ///////////////////////////////////////////////////////////////
 
-JSONParser::StringBuilder::StringBuilder()
-    : pos_(NULL),
-      length_(0),
-      string_(NULL) {
-}
+JSONParser::StringBuilder::StringBuilder() : StringBuilder(nullptr) {}
 
 JSONParser::StringBuilder::StringBuilder(const char* pos)
     : pos_(pos),
       length_(0),
-      string_(NULL) {
+      string_(nullptr) {
 }
 
 void JSONParser::StringBuilder::Swap(StringBuilder* other) {
@@ -309,7 +313,7 @@
 
 void JSONParser::StringBuilder::Append(const char& c) {
   DCHECK_GE(c, 0);
-  DCHECK_LT(c, 128);
+  DCHECK_LT(static_cast<unsigned char>(c), 128);
 
   if (string_)
     string_->push_back(c);
@@ -483,36 +487,36 @@
       return ConsumeLiteral();
     default:
       ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
-      return NULL;
+      return nullptr;
   }
 }
 
 Value* JSONParser::ConsumeDictionary() {
   if (*pos_ != '{') {
     ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
-    return NULL;
+    return nullptr;
   }
 
   StackMarker depth_check(&stack_depth_);
   if (depth_check.IsTooDeep()) {
     ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
-    return NULL;
+    return nullptr;
   }
 
-  scoped_ptr<DictionaryValue> dict(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> dict(new DictionaryValue);
 
   NextChar();
   Token token = GetNextToken();
   while (token != T_OBJECT_END) {
     if (token != T_STRING) {
       ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 1);
-      return NULL;
+      return nullptr;
     }
 
     // First consume the key.
     StringBuilder key;
     if (!ConsumeStringRaw(&key)) {
-      return NULL;
+      return nullptr;
     }
 
     // Read the separator.
@@ -520,7 +524,7 @@
     token = GetNextToken();
     if (token != T_OBJECT_PAIR_SEPARATOR) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-      return NULL;
+      return nullptr;
     }
 
     // The next token is the value. Ownership transfers to |dict|.
@@ -528,7 +532,7 @@
     Value* value = ParseNextToken();
     if (!value) {
       // ReportError from deeper level.
-      return NULL;
+      return nullptr;
     }
 
     dict->SetWithoutPathExpansion(key.AsString(), value);
@@ -540,11 +544,11 @@
       token = GetNextToken();
       if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
         ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
-        return NULL;
+        return nullptr;
       }
     } else if (token != T_OBJECT_END) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
-      return NULL;
+      return nullptr;
     }
   }
 
@@ -554,16 +558,16 @@
 Value* JSONParser::ConsumeList() {
   if (*pos_ != '[') {
     ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
-    return NULL;
+    return nullptr;
   }
 
   StackMarker depth_check(&stack_depth_);
   if (depth_check.IsTooDeep()) {
     ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
-    return NULL;
+    return nullptr;
   }
 
-  scoped_ptr<ListValue> list(new ListValue);
+  std::unique_ptr<ListValue> list(new ListValue);
 
   NextChar();
   Token token = GetNextToken();
@@ -571,7 +575,7 @@
     Value* item = ParseToken(token);
     if (!item) {
       // ReportError from deeper level.
-      return NULL;
+      return nullptr;
     }
 
     list->Append(item);
@@ -583,11 +587,11 @@
       token = GetNextToken();
       if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
         ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
-        return NULL;
+        return nullptr;
       }
     } else if (token != T_ARRAY_END) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-      return NULL;
+      return nullptr;
     }
   }
 
@@ -597,17 +601,16 @@
 Value* JSONParser::ConsumeString() {
   StringBuilder string;
   if (!ConsumeStringRaw(&string))
-    return NULL;
+    return nullptr;
 
   // Create the Value representation, using a hidden root, if configured
   // to do so, and if the string can be represented by StringPiece.
-  if (string.CanBeStringPiece() && !(options_ & JSON_DETACHABLE_CHILDREN)) {
+  if (string.CanBeStringPiece() && !(options_ & JSON_DETACHABLE_CHILDREN))
     return new JSONStringValue(string.AsStringPiece());
-  } else {
-    if (string.CanBeStringPiece())
-      string.Convert();
-    return new StringValue(string.AsString());
-  }
+
+  if (string.CanBeStringPiece())
+    string.Convert();
+  return new StringValue(string.AsString());
 }
 
 bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
@@ -632,11 +635,23 @@
       return false;
     }
 
-    // If this character is an escape sequence...
-    if (next_char == '\\') {
-      // The input string will be adjusted (either by combining the two
-      // characters of an encoded escape sequence, or with a UTF conversion),
-      // so using StringPiece isn't possible -- force a conversion.
+    if (next_char == '"') {
+      --index_;  // Rewind by one because of CBU8_NEXT.
+      out->Swap(&string);
+      return true;
+    }
+
+    // If this character is not an escape sequence...
+    if (next_char != '\\') {
+      if (next_char < kExtendedASCIIStart)
+        string.Append(static_cast<char>(next_char));
+      else
+        DecodeUTF8(next_char, &string);
+    } else {
+      // And if it is an escape sequence, the input string will be adjusted
+      // (either by combining the two characters of an encoded escape sequence,
+      // or with a UTF conversion), so using StringPiece isn't possible -- force
+      // a conversion.
       string.Convert();
 
       if (!CanConsume(1)) {
@@ -718,15 +733,6 @@
           ReportError(JSONReader::JSON_INVALID_ESCAPE, 0);
           return false;
       }
-    } else if (next_char == '"') {
-      --index_;  // Rewind by one because of CBU8_NEXT.
-      out->Swap(&string);
-      return true;
-    } else {
-      if (next_char < kExtendedASCIIStart)
-        string.Append(static_cast<char>(next_char));
-      else
-        DecodeUTF8(next_char, &string);
     }
   }
 
@@ -831,7 +837,7 @@
 
   if (!ReadInt(false)) {
     ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-    return NULL;
+    return nullptr;
   }
   end_index = index_;
 
@@ -839,12 +845,12 @@
   if (*pos_ == '.') {
     if (!CanConsume(1)) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-      return NULL;
+      return nullptr;
     }
     NextChar();
     if (!ReadInt(true)) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-      return NULL;
+      return nullptr;
     }
     end_index = index_;
   }
@@ -856,7 +862,7 @@
       NextChar();
     if (!ReadInt(true)) {
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-      return NULL;
+      return nullptr;
     }
     end_index = index_;
   }
@@ -876,7 +882,7 @@
       break;
     default:
       ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-      return NULL;
+      return nullptr;
   }
 
   pos_ = exit_pos;
@@ -894,7 +900,7 @@
     return new FundamentalValue(num_double);
   }
 
-  return NULL;
+  return nullptr;
 }
 
 bool JSONParser::ReadInt(bool allow_leading_zeros) {
@@ -924,7 +930,7 @@
       if (!CanConsume(kTrueLen - 1) ||
           !StringsAreEqual(pos_, kTrueLiteral, kTrueLen)) {
         ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-        return NULL;
+        return nullptr;
       }
       NextNChars(kTrueLen - 1);
       return new FundamentalValue(true);
@@ -935,7 +941,7 @@
       if (!CanConsume(kFalseLen - 1) ||
           !StringsAreEqual(pos_, kFalseLiteral, kFalseLen)) {
         ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-        return NULL;
+        return nullptr;
       }
       NextNChars(kFalseLen - 1);
       return new FundamentalValue(false);
@@ -946,14 +952,14 @@
       if (!CanConsume(kNullLen - 1) ||
           !StringsAreEqual(pos_, kNullLiteral, kNullLen)) {
         ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
-        return NULL;
+        return nullptr;
       }
       NextNChars(kNullLen - 1);
       return Value::CreateNullValue().release();
     }
     default:
       ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/base/json/json_parser.h b/base/json/json_parser.h
index fc04594..7539fa9 100644
--- a/base/json/json_parser.h
+++ b/base/json/json_parser.h
@@ -8,6 +8,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
@@ -49,8 +50,10 @@
   ~JSONParser();
 
   // Parses the input string according to the set options and returns the
-  // result as a Value owned by the caller.
-  Value* Parse(const StringPiece& input);
+  // result as a Value.
+  // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+  // convert to a FooValue at the same time.
+  std::unique_ptr<Value> Parse(StringPiece input);
 
   // Returns the error code.
   JSONReader::JsonParseError error_code() const;
@@ -133,7 +136,7 @@
     size_t length_;
 
     // The copied string representation. NULL until Convert() is called.
-    // Strong. scoped_ptr<T> has too much of an overhead here.
+    // Strong. std::unique_ptr<T> has too much of an overhead here.
     std::string* string_;
   };
 
@@ -218,7 +221,7 @@
                                         const std::string& description);
 
   // base::JSONParserOptions that control parsing.
-  int options_;
+  const int options_;
 
   // Pointer to the start of the input data.
   const char* start_pos_;
diff --git a/base/json/json_parser_unittest.cc b/base/json/json_parser_unittest.cc
index da86b33..30255ca 100644
--- a/base/json/json_parser_unittest.cc
+++ b/base/json/json_parser_unittest.cc
@@ -6,8 +6,9 @@
 
 #include <stddef.h>
 
+#include <memory>
+
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -34,7 +35,7 @@
 
 TEST_F(JSONParserTest, NextChar) {
   std::string input("Hello world");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
 
   EXPECT_EQ('H', *parser->pos_);
   for (size_t i = 1; i < input.length(); ++i) {
@@ -45,8 +46,8 @@
 
 TEST_F(JSONParserTest, ConsumeString) {
   std::string input("\"test\",|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeString());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeString());
   EXPECT_EQ('"', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -59,8 +60,8 @@
 
 TEST_F(JSONParserTest, ConsumeList) {
   std::string input("[true, false],|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeList());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeList());
   EXPECT_EQ(']', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -73,8 +74,8 @@
 
 TEST_F(JSONParserTest, ConsumeDictionary) {
   std::string input("{\"abc\":\"def\"},|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeDictionary());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeDictionary());
   EXPECT_EQ('}', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -90,8 +91,8 @@
 TEST_F(JSONParserTest, ConsumeLiterals) {
   // Literal |true|.
   std::string input("true,|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeLiteral());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeLiteral());
   EXPECT_EQ('e', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -128,8 +129,8 @@
 TEST_F(JSONParserTest, ConsumeNumbers) {
   // Integer.
   std::string input("1234,|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeNumber());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeNumber());
   EXPECT_EQ('4', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -205,7 +206,7 @@
   // Error strings should not be modified in case of success.
   std::string error_message;
   int error_code = 0;
-  scoped_ptr<Value> root = JSONReader::ReadAndReturnError(
+  std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
       "[42]", JSON_PARSE_RFC, &error_code, &error_message);
   EXPECT_TRUE(error_message.empty());
   EXPECT_EQ(0, error_code);
@@ -309,7 +310,7 @@
       "[\"😇\",[],[],[],{\"google:suggesttype\":[]}]";
   std::string error_message;
   int error_code = 0;
-  scoped_ptr<Value> root = JSONReader::ReadAndReturnError(
+  std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
       kUtf8Data, JSON_PARSE_RFC, &error_code, &error_message);
   EXPECT_TRUE(root.get()) << error_message;
 }
diff --git a/base/json/json_reader.cc b/base/json/json_reader.cc
index 3ab5f75..4ff7496 100644
--- a/base/json/json_reader.cc
+++ b/base/json/json_reader.cc
@@ -43,27 +43,28 @@
 }
 
 // static
-scoped_ptr<Value> JSONReader::Read(const StringPiece& json) {
+std::unique_ptr<Value> JSONReader::Read(StringPiece json) {
   internal::JSONParser parser(JSON_PARSE_RFC);
-  return make_scoped_ptr(parser.Parse(json));
+  return parser.Parse(json);
 }
 
 // static
-scoped_ptr<Value> JSONReader::Read(const StringPiece& json, int options) {
+std::unique_ptr<Value> JSONReader::Read(StringPiece json, int options) {
   internal::JSONParser parser(options);
-  return make_scoped_ptr(parser.Parse(json));
+  return parser.Parse(json);
 }
 
 
 // static
-scoped_ptr<Value> JSONReader::ReadAndReturnError(const StringPiece& json,
-                                                 int options,
-                                                 int* error_code_out,
-                                                 std::string* error_msg_out,
-                                                 int* error_line_out,
-                                                 int* error_column_out) {
+std::unique_ptr<Value> JSONReader::ReadAndReturnError(
+    const StringPiece& json,
+    int options,
+    int* error_code_out,
+    std::string* error_msg_out,
+    int* error_line_out,
+    int* error_column_out) {
   internal::JSONParser parser(options);
-  scoped_ptr<Value> root(parser.Parse(json));
+  std::unique_ptr<Value> root(parser.Parse(json));
   if (!root) {
     if (error_code_out)
       *error_code_out = parser.error_code();
@@ -105,8 +106,8 @@
   }
 }
 
-scoped_ptr<Value> JSONReader::ReadToValue(const std::string& json) {
-  return make_scoped_ptr(parser_->Parse(json));
+std::unique_ptr<Value> JSONReader::ReadToValue(StringPiece json) {
+  return parser_->Parse(json);
 }
 
 JSONReader::JsonParseError JSONReader::error_code() const {
diff --git a/base/json/json_reader.h b/base/json/json_reader.h
index c6bcb52..a954821 100644
--- a/base/json/json_reader.h
+++ b/base/json/json_reader.h
@@ -28,10 +28,10 @@
 #ifndef BASE_JSON_JSON_READER_H_
 #define BASE_JSON_JSON_READER_H_
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 
 namespace base {
@@ -91,32 +91,33 @@
 
   ~JSONReader();
 
-  // Reads and parses |json|, returning a Value. The caller owns the returned
-  // instance. If |json| is not a properly formed JSON string, returns NULL.
-  static scoped_ptr<Value> Read(const StringPiece& json);
+  // Reads and parses |json|, returning a Value.
+  // If |json| is not a properly formed JSON string, returns nullptr.
+  // Wrap this in base::FooValue::From() to check the Value is of type Foo and
+  // convert to a FooValue at the same time.
+  static std::unique_ptr<Value> Read(StringPiece json);
 
-  // Reads and parses |json|, returning a Value owned by the caller. The
-  // parser respects the given |options|. If the input is not properly formed,
-  // returns NULL.
-  static scoped_ptr<Value> Read(const StringPiece& json, int options);
+  // Same as Read() above, but the parser respects the given |options|.
+  static std::unique_ptr<Value> Read(StringPiece json, int options);
 
   // Reads and parses |json| like Read(). |error_code_out| and |error_msg_out|
-  // are optional. If specified and NULL is returned, they will be populated
+  // are optional. If specified and nullptr is returned, they will be populated
   // an error code and a formatted error message (including error location if
   // appropriate). Otherwise, they will be unmodified.
-  static scoped_ptr<Value> ReadAndReturnError(const StringPiece& json,
-                                              int options,  // JSONParserOptions
-                                              int* error_code_out,
-                                              std::string* error_msg_out,
-                                              int* error_line_out = nullptr,
-                                              int* error_column_out = nullptr);
+  static std::unique_ptr<Value> ReadAndReturnError(
+      const StringPiece& json,
+      int options,  // JSONParserOptions
+      int* error_code_out,
+      std::string* error_msg_out,
+      int* error_line_out = nullptr,
+      int* error_column_out = nullptr);
 
   // Converts a JSON parse error code into a human readable message.
   // Returns an empty string if error_code is JSON_NO_ERROR.
   static std::string ErrorCodeToString(JsonParseError error_code);
 
-  // Parses an input string into a Value that is owned by the caller.
-  scoped_ptr<Value> ReadToValue(const std::string& json);
+  // Non-static version of Read() above.
+  std::unique_ptr<Value> ReadToValue(StringPiece json);
 
   // Returns the error code if the last call to ReadToValue() failed.
   // Returns JSON_NO_ERROR otherwise.
@@ -127,7 +128,7 @@
   std::string GetErrorMessage() const;
 
  private:
-  scoped_ptr<internal::JSONParser> parser_;
+  std::unique_ptr<internal::JSONParser> parser_;
 };
 
 }  // namespace base
diff --git a/base/json/json_reader_unittest.cc b/base/json/json_reader_unittest.cc
index 45c04d8..84732c4 100644
--- a/base/json/json_reader_unittest.cc
+++ b/base/json/json_reader_unittest.cc
@@ -6,6 +6,8 @@
 
 #include <stddef.h>
 
+#include <memory>
+
 #if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
 #include "base/base_paths.h"
 #include "base/path_service.h"
@@ -14,7 +16,6 @@
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/values.h"
@@ -24,525 +25,549 @@
 namespace base {
 
 TEST(JSONReaderTest, Reading) {
-  // some whitespace checking
-  scoped_ptr<Value> root = JSONReader().ReadToValue("   null   ");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
-
-  // Invalid JSON string
-  root = JSONReader().ReadToValue("nu");
-  EXPECT_FALSE(root.get());
-
-  // Simple bool
-  root = JSONReader().ReadToValue("true  ");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
-
-  // Embedded comment
-  root = JSONReader().ReadToValue("/* comment */null");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
-  root = JSONReader().ReadToValue("40 /* comment */");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
-  root = JSONReader().ReadToValue("true // comment");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
-  root = JSONReader().ReadToValue("/* comment */\"sample string\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  std::string value;
-  EXPECT_TRUE(root->GetAsString(&value));
-  EXPECT_EQ("sample string", value);
-  root = JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]");
-  ASSERT_TRUE(root.get());
-  ListValue* list = static_cast<ListValue*>(root.get());
-  EXPECT_EQ(2u, list->GetSize());
-  int int_val = 0;
-  EXPECT_TRUE(list->GetInteger(0, &int_val));
-  EXPECT_EQ(1, int_val);
-  EXPECT_TRUE(list->GetInteger(1, &int_val));
-  EXPECT_EQ(3, int_val);
-  root = JSONReader().ReadToValue("[1, /*a*/2, 3]");
-  ASSERT_TRUE(root.get());
-  list = static_cast<ListValue*>(root.get());
-  EXPECT_EQ(3u, list->GetSize());
-  root = JSONReader().ReadToValue("/* comment **/42");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
-  EXPECT_TRUE(root->GetAsInteger(&int_val));
-  EXPECT_EQ(42, int_val);
-  root = JSONReader().ReadToValue(
-      "/* comment **/\n"
-      "// */ 43\n"
-      "44");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
-  EXPECT_TRUE(root->GetAsInteger(&int_val));
-  EXPECT_EQ(44, int_val);
-
-  // Test number formats
-  root = JSONReader().ReadToValue("43");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
-  EXPECT_TRUE(root->GetAsInteger(&int_val));
-  EXPECT_EQ(43, int_val);
-
-  // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
-  root = JSONReader().ReadToValue("043");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("0x43");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("00");
-  EXPECT_FALSE(root.get());
-
-  // Test 0 (which needs to be special cased because of the leading zero
-  // clause).
-  root = JSONReader().ReadToValue("0");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
-  int_val = 1;
-  EXPECT_TRUE(root->GetAsInteger(&int_val));
-  EXPECT_EQ(0, int_val);
-
-  // Numbers that overflow ints should succeed, being internally promoted to
-  // storage as doubles
-  root = JSONReader().ReadToValue("2147483648");
-  ASSERT_TRUE(root.get());
-  double double_val;
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(2147483648.0, double_val);
-  root = JSONReader().ReadToValue("-2147483649");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
-
-  // Parse a double
-  root = JSONReader().ReadToValue("43.1");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(43.1, double_val);
-
-  root = JSONReader().ReadToValue("4.3e-1");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(.43, double_val);
-
-  root = JSONReader().ReadToValue("2.1e0");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(2.1, double_val);
-
-  root = JSONReader().ReadToValue("2.1e+0001");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(21.0, double_val);
-
-  root = JSONReader().ReadToValue("0.01");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(0.01, double_val);
-
-  root = JSONReader().ReadToValue("1.00");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
-  double_val = 0.0;
-  EXPECT_TRUE(root->GetAsDouble(&double_val));
-  EXPECT_DOUBLE_EQ(1.0, double_val);
-
-  // Fractional parts must have a digit before and after the decimal point.
-  root = JSONReader().ReadToValue("1.");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue(".1");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("1.e10");
-  EXPECT_FALSE(root.get());
-
-  // Exponent must have a digit following the 'e'.
-  root = JSONReader().ReadToValue("1e");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("1E");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("1e1.");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("1e1.0");
-  EXPECT_FALSE(root.get());
-
-  // INF/-INF/NaN are not valid
-  root = JSONReader().ReadToValue("1e1000");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("-1e1000");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("NaN");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("nan");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("inf");
-  EXPECT_FALSE(root.get());
-
-  // Invalid number formats
-  root = JSONReader().ReadToValue("4.3.1");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("4e3.1");
-  EXPECT_FALSE(root.get());
-
-  // Test string parser
-  root = JSONReader().ReadToValue("\"hello world\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  std::string str_val;
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ("hello world", str_val);
-
-  // Empty string
-  root = JSONReader().ReadToValue("\"\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  str_val.clear();
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ("", str_val);
-
-  // Test basic string escapes
-  root = JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  str_val.clear();
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
-
-  // Test hex and unicode escapes including the null character.
-  root = JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  str_val.clear();
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
-
-  // Test invalid strings
-  root = JSONReader().ReadToValue("\"no closing quote");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("\"\\z invalid escape char\"");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("\"\\xAQ invalid hex code\"");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("not enough hex chars\\x1\"");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("\"not enough escape chars\\u123\"");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("\"extra backslash at end of input\\\"");
-  EXPECT_FALSE(root.get());
-
-  // Basic array
-  root = JSONReader::Read("[true, false, null]");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
-  list = static_cast<ListValue*>(root.get());
-  EXPECT_EQ(3U, list->GetSize());
-
-  // Test with trailing comma.  Should be parsed the same as above.
-  scoped_ptr<Value> root2 =
-      JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_TRUE(root->Equals(root2.get()));
-
-  // Empty array
-  root = JSONReader::Read("[]");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
-  list = static_cast<ListValue*>(root.get());
-  EXPECT_EQ(0U, list->GetSize());
-
-  // Nested arrays
-  root = JSONReader::Read("[[true], [], [false, [], [null]], null]");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
-  list = static_cast<ListValue*>(root.get());
-  EXPECT_EQ(4U, list->GetSize());
-
-  // Lots of trailing commas.
-  root2 = JSONReader::Read("[[true], [], [false, [], [null, ]  , ], null,]",
-                           JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_TRUE(root->Equals(root2.get()));
-
-  // Invalid, missing close brace.
-  root = JSONReader::Read("[[true], [], [false, [], [null]], null");
-  EXPECT_FALSE(root.get());
-
-  // Invalid, too many commas
-  root = JSONReader::Read("[true,, null]");
-  EXPECT_FALSE(root.get());
-  root = JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-
-  // Invalid, no commas
-  root = JSONReader::Read("[true null]");
-  EXPECT_FALSE(root.get());
-
-  // Invalid, trailing comma
-  root = JSONReader::Read("[true,]");
-  EXPECT_FALSE(root.get());
-
-  // Valid if we set |allow_trailing_comma| to true.
-  root = JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS);
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
-  list = static_cast<ListValue*>(root.get());
-  EXPECT_EQ(1U, list->GetSize());
-  Value* tmp_value = NULL;
-  ASSERT_TRUE(list->Get(0, &tmp_value));
-  EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
-  bool bool_value = false;
-  EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
-  EXPECT_TRUE(bool_value);
-
-  // Don't allow empty elements, even if |allow_trailing_comma| is
-  // true.
-  root = JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-  root = JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-  root = JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-  root = JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-
-  // Test objects
-  root = JSONReader::Read("{}");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-
-  root = JSONReader::Read(
-      "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-  DictionaryValue* dict_val = static_cast<DictionaryValue*>(root.get());
-  double_val = 0.0;
-  EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
-  EXPECT_DOUBLE_EQ(9.87654321, double_val);
-  Value* null_val = NULL;
-  ASSERT_TRUE(dict_val->Get("null", &null_val));
-  EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
-  str_val.clear();
-  EXPECT_TRUE(dict_val->GetString("S", &str_val));
-  EXPECT_EQ("str", str_val);
-
-  root2 = JSONReader::Read(
-      "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
-      JSON_ALLOW_TRAILING_COMMAS);
-  ASSERT_TRUE(root2.get());
-  EXPECT_TRUE(root->Equals(root2.get()));
-
-  // Test newline equivalence.
-  root2 = JSONReader::Read(
-      "{\n"
-      "  \"number\":9.87654321,\n"
-      "  \"null\":null,\n"
-      "  \"\\x53\":\"str\",\n"
-      "}\n",
-      JSON_ALLOW_TRAILING_COMMAS);
-  ASSERT_TRUE(root2.get());
-  EXPECT_TRUE(root->Equals(root2.get()));
-
-  root2 = JSONReader::Read(
-      "{\r\n"
-      "  \"number\":9.87654321,\r\n"
-      "  \"null\":null,\r\n"
-      "  \"\\x53\":\"str\",\r\n"
-      "}\r\n",
-      JSON_ALLOW_TRAILING_COMMAS);
-  ASSERT_TRUE(root2.get());
-  EXPECT_TRUE(root->Equals(root2.get()));
-
-  // Test nesting
-  root = JSONReader::Read(
-      "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-  dict_val = static_cast<DictionaryValue*>(root.get());
-  DictionaryValue* inner_dict = NULL;
-  ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
-  ListValue* inner_array = NULL;
-  ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
-  EXPECT_EQ(1U, inner_array->GetSize());
-  bool_value = true;
-  EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
-  EXPECT_FALSE(bool_value);
-  inner_dict = NULL;
-  EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
-
-  root2 = JSONReader::Read(
-      "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
-      JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_TRUE(root->Equals(root2.get()));
-
-  // Test keys with periods
-  root = JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-  dict_val = static_cast<DictionaryValue*>(root.get());
-  int integer_value = 0;
-  EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
-  EXPECT_EQ(3, integer_value);
-  EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
-  EXPECT_EQ(2, integer_value);
-  inner_dict = NULL;
-  ASSERT_TRUE(dict_val->GetDictionaryWithoutPathExpansion("d.e.f",
-                                                          &inner_dict));
-  EXPECT_EQ(1U, inner_dict->size());
-  EXPECT_TRUE(inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j",
-                                                         &integer_value));
-  EXPECT_EQ(1, integer_value);
-
-  root = JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-  dict_val = static_cast<DictionaryValue*>(root.get());
-  EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
-  EXPECT_EQ(2, integer_value);
-  EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
-  EXPECT_EQ(1, integer_value);
-
-  // Invalid, no closing brace
-  root = JSONReader::Read("{\"a\": true");
-  EXPECT_FALSE(root.get());
-
-  // Invalid, keys must be quoted
-  root = JSONReader::Read("{foo:true}");
-  EXPECT_FALSE(root.get());
-
-  // Invalid, trailing comma
-  root = JSONReader::Read("{\"a\":true,}");
-  EXPECT_FALSE(root.get());
-
-  // Invalid, too many commas
-  root = JSONReader::Read("{\"a\":true,,\"b\":false}");
-  EXPECT_FALSE(root.get());
-  root =
-      JSONReader::Read("{\"a\":true,,\"b\":false}", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-
-  // Invalid, no separator
-  root = JSONReader::Read("{\"a\" \"b\"}");
-  EXPECT_FALSE(root.get());
-
-  // Invalid, lone comma.
-  root = JSONReader::Read("{,}");
-  EXPECT_FALSE(root.get());
-  root = JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-  root = JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-  root = JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-  root =
-      JSONReader::Read("{\"a\":true,,\"b\":false}", JSON_ALLOW_TRAILING_COMMAS);
-  EXPECT_FALSE(root.get());
-
-  // Test stack overflow
-  std::string evil(1000000, '[');
-  evil.append(std::string(1000000, ']'));
-  root = JSONReader::Read(evil);
-  EXPECT_FALSE(root.get());
-
-  // A few thousand adjacent lists is fine.
-  std::string not_evil("[");
-  not_evil.reserve(15010);
-  for (int i = 0; i < 5000; ++i) {
-    not_evil.append("[],");
-  }
-  not_evil.append("[]]");
-  root = JSONReader::Read(not_evil);
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_LIST));
-  list = static_cast<ListValue*>(root.get());
-  EXPECT_EQ(5001U, list->GetSize());
-
-  // Test utf8 encoded input
-  root = JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  str_val.clear();
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
-
-  root = JSONReader().ReadToValue(
-      "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
-  EXPECT_TRUE(root->GetAsDictionary(&dict_val));
-  EXPECT_TRUE(dict_val->GetString("path", &str_val));
-  EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
-
-  // Test invalid utf8 encoded input
-  root = JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\"");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("\"123\xc0\x81\"");
-  EXPECT_FALSE(root.get());
-  root = JSONReader().ReadToValue("\"abc\xc0\xae\"");
-  EXPECT_FALSE(root.get());
-
-  // Test utf16 encoded strings.
-  root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  str_val.clear();
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ("\xe2\x82\xac""3,14", str_val);
-
-  root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
-  str_val.clear();
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
-
-  // Test invalid utf16 strings.
-  const char* const cases[] = {
-    "\"\\u123\"",  // Invalid scalar.
-    "\"\\ud83d\"",  // Invalid scalar.
-    "\"\\u$%@!\"",  // Invalid scalar.
-    "\"\\uzz89\"",  // Invalid scalar.
-    "\"\\ud83d\\udca\"",  // Invalid lower surrogate.
-    "\"\\ud83d\\ud83d\"",  // Invalid lower surrogate.
-    "\"\\ud83foo\"",  // No lower surrogate.
-    "\"\\ud83\\foo\""  // No lower surrogate.
-  };
-  for (size_t i = 0; i < arraysize(cases); ++i) {
-    root = JSONReader().ReadToValue(cases[i]);
-    EXPECT_FALSE(root.get()) << cases[i];
+  {
+    // some whitespace checking
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("   null   ");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
   }
 
-  // Test literal root objects.
-  root = JSONReader::Read("null");
-  EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+  {
+    // Invalid JSON string
+    EXPECT_FALSE(JSONReader().ReadToValue("nu"));
+  }
 
-  root = JSONReader::Read("true");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->GetAsBoolean(&bool_value));
-  EXPECT_TRUE(bool_value);
+  {
+    // Simple bool
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("true  ");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+  }
 
-  root = JSONReader::Read("10");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->GetAsInteger(&integer_value));
-  EXPECT_EQ(10, integer_value);
+  {
+    // Embedded comment
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+    root = JSONReader().ReadToValue("40 /* comment */");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+    root = JSONReader().ReadToValue("true // comment");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+    root = JSONReader().ReadToValue("/* comment */\"sample string\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    std::string value;
+    EXPECT_TRUE(root->GetAsString(&value));
+    EXPECT_EQ("sample string", value);
+    std::unique_ptr<ListValue> list = ListValue::From(
+        JSONReader().ReadToValue("[1, /* comment, 2 ] */ \n 3]"));
+    ASSERT_TRUE(list);
+    EXPECT_EQ(2u, list->GetSize());
+    int int_val = 0;
+    EXPECT_TRUE(list->GetInteger(0, &int_val));
+    EXPECT_EQ(1, int_val);
+    EXPECT_TRUE(list->GetInteger(1, &int_val));
+    EXPECT_EQ(3, int_val);
+    list = ListValue::From(JSONReader().ReadToValue("[1, /*a*/2, 3]"));
+    ASSERT_TRUE(list);
+    EXPECT_EQ(3u, list->GetSize());
+    root = JSONReader().ReadToValue("/* comment **/42");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+    EXPECT_TRUE(root->GetAsInteger(&int_val));
+    EXPECT_EQ(42, int_val);
+    root = JSONReader().ReadToValue(
+        "/* comment **/\n"
+        "// */ 43\n"
+        "44");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+    EXPECT_TRUE(root->GetAsInteger(&int_val));
+    EXPECT_EQ(44, int_val);
+  }
 
-  root = JSONReader::Read("\"root\"");
-  ASSERT_TRUE(root.get());
-  EXPECT_TRUE(root->GetAsString(&str_val));
-  EXPECT_EQ("root", str_val);
+  {
+    // Test number formats
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+    int int_val = 0;
+    EXPECT_TRUE(root->GetAsInteger(&int_val));
+    EXPECT_EQ(43, int_val);
+  }
+
+  {
+    // According to RFC4627, oct, hex, and leading zeros are invalid JSON.
+    EXPECT_FALSE(JSONReader().ReadToValue("043"));
+    EXPECT_FALSE(JSONReader().ReadToValue("0x43"));
+    EXPECT_FALSE(JSONReader().ReadToValue("00"));
+  }
+
+  {
+    // Test 0 (which needs to be special cased because of the leading zero
+    // clause).
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+    int int_val = 1;
+    EXPECT_TRUE(root->GetAsInteger(&int_val));
+    EXPECT_EQ(0, int_val);
+  }
+
+  {
+    // Numbers that overflow ints should succeed, being internally promoted to
+    // storage as doubles
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
+    ASSERT_TRUE(root);
+    double double_val;
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(2147483648.0, double_val);
+    root = JSONReader().ReadToValue("-2147483649");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
+  }
+
+  {
+    // Parse a double
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(43.1, double_val);
+
+    root = JSONReader().ReadToValue("4.3e-1");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(.43, double_val);
+
+    root = JSONReader().ReadToValue("2.1e0");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(2.1, double_val);
+
+    root = JSONReader().ReadToValue("2.1e+0001");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(21.0, double_val);
+
+    root = JSONReader().ReadToValue("0.01");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(0.01, double_val);
+
+    root = JSONReader().ReadToValue("1.00");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+    double_val = 0.0;
+    EXPECT_TRUE(root->GetAsDouble(&double_val));
+    EXPECT_DOUBLE_EQ(1.0, double_val);
+  }
+
+  {
+    // Fractional parts must have a digit before and after the decimal point.
+    EXPECT_FALSE(JSONReader().ReadToValue("1."));
+    EXPECT_FALSE(JSONReader().ReadToValue(".1"));
+    EXPECT_FALSE(JSONReader().ReadToValue("1.e10"));
+  }
+
+  {
+    // Exponent must have a digit following the 'e'.
+    EXPECT_FALSE(JSONReader().ReadToValue("1e"));
+    EXPECT_FALSE(JSONReader().ReadToValue("1E"));
+    EXPECT_FALSE(JSONReader().ReadToValue("1e1."));
+    EXPECT_FALSE(JSONReader().ReadToValue("1e1.0"));
+  }
+
+  {
+    // INF/-INF/NaN are not valid
+    EXPECT_FALSE(JSONReader().ReadToValue("1e1000"));
+    EXPECT_FALSE(JSONReader().ReadToValue("-1e1000"));
+    EXPECT_FALSE(JSONReader().ReadToValue("NaN"));
+    EXPECT_FALSE(JSONReader().ReadToValue("nan"));
+    EXPECT_FALSE(JSONReader().ReadToValue("inf"));
+  }
+
+  {
+    // Invalid number formats
+    EXPECT_FALSE(JSONReader().ReadToValue("4.3.1"));
+    EXPECT_FALSE(JSONReader().ReadToValue("4e3.1"));
+  }
+
+  {
+    // Test string parser
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    std::string str_val;
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ("hello world", str_val);
+  }
+
+  {
+    // Empty string
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    std::string str_val;
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ("", str_val);
+  }
+
+  {
+    // Test basic string escapes
+    std::unique_ptr<Value> root =
+        JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    std::string str_val;
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
+  }
+
+  {
+    // Test hex and unicode escapes including the null character.
+    std::unique_ptr<Value> root =
+        JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    std::string str_val;
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
+  }
+
+  {
+    // Test invalid strings
+    EXPECT_FALSE(JSONReader().ReadToValue("\"no closing quote"));
+    EXPECT_FALSE(JSONReader().ReadToValue("\"\\z invalid escape char\""));
+    EXPECT_FALSE(JSONReader().ReadToValue("\"\\xAQ invalid hex code\""));
+    EXPECT_FALSE(JSONReader().ReadToValue("not enough hex chars\\x1\""));
+    EXPECT_FALSE(JSONReader().ReadToValue("\"not enough escape chars\\u123\""));
+    EXPECT_FALSE(
+        JSONReader().ReadToValue("\"extra backslash at end of input\\\""));
+  }
+
+  {
+    // Basic array
+    std::unique_ptr<ListValue> list =
+        ListValue::From(JSONReader::Read("[true, false, null]"));
+    ASSERT_TRUE(list);
+    EXPECT_EQ(3U, list->GetSize());
+
+    // Test with trailing comma.  Should be parsed the same as above.
+    std::unique_ptr<Value> root2 =
+        JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
+    EXPECT_TRUE(list->Equals(root2.get()));
+  }
+
+  {
+    // Empty array
+    std::unique_ptr<ListValue> list = ListValue::From(JSONReader::Read("[]"));
+    ASSERT_TRUE(list);
+    EXPECT_EQ(0U, list->GetSize());
+  }
+
+  {
+    // Nested arrays
+    std::unique_ptr<ListValue> list = ListValue::From(
+        JSONReader::Read("[[true], [], [false, [], [null]], null]"));
+    ASSERT_TRUE(list);
+    EXPECT_EQ(4U, list->GetSize());
+
+    // Lots of trailing commas.
+    std::unique_ptr<Value> root2 =
+        JSONReader::Read("[[true], [], [false, [], [null, ]  , ], null,]",
+                         JSON_ALLOW_TRAILING_COMMAS);
+    EXPECT_TRUE(list->Equals(root2.get()));
+  }
+
+  {
+    // Invalid, missing close brace.
+    EXPECT_FALSE(JSONReader::Read("[[true], [], [false, [], [null]], null"));
+
+    // Invalid, too many commas
+    EXPECT_FALSE(JSONReader::Read("[true,, null]"));
+    EXPECT_FALSE(JSONReader::Read("[true,, null]", JSON_ALLOW_TRAILING_COMMAS));
+
+    // Invalid, no commas
+    EXPECT_FALSE(JSONReader::Read("[true null]"));
+
+    // Invalid, trailing comma
+    EXPECT_FALSE(JSONReader::Read("[true,]"));
+  }
+
+  {
+    // Valid if we set |allow_trailing_comma| to true.
+    std::unique_ptr<ListValue> list = ListValue::From(
+        JSONReader::Read("[true,]", JSON_ALLOW_TRAILING_COMMAS));
+    ASSERT_TRUE(list);
+    EXPECT_EQ(1U, list->GetSize());
+    Value* tmp_value = nullptr;
+    ASSERT_TRUE(list->Get(0, &tmp_value));
+    EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
+    bool bool_value = false;
+    EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
+    EXPECT_TRUE(bool_value);
+  }
+
+  {
+    // Don't allow empty elements, even if |allow_trailing_comma| is
+    // true.
+    EXPECT_FALSE(JSONReader::Read("[,]", JSON_ALLOW_TRAILING_COMMAS));
+    EXPECT_FALSE(JSONReader::Read("[true,,]", JSON_ALLOW_TRAILING_COMMAS));
+    EXPECT_FALSE(JSONReader::Read("[,true,]", JSON_ALLOW_TRAILING_COMMAS));
+    EXPECT_FALSE(JSONReader::Read("[true,,false]", JSON_ALLOW_TRAILING_COMMAS));
+  }
+
+  {
+    // Test objects
+    std::unique_ptr<DictionaryValue> dict_val =
+        DictionaryValue::From(JSONReader::Read("{}"));
+    ASSERT_TRUE(dict_val);
+
+    dict_val = DictionaryValue::From(JSONReader::Read(
+        "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\" }"));
+    ASSERT_TRUE(dict_val);
+    double double_val = 0.0;
+    EXPECT_TRUE(dict_val->GetDouble("number", &double_val));
+    EXPECT_DOUBLE_EQ(9.87654321, double_val);
+    Value* null_val = nullptr;
+    ASSERT_TRUE(dict_val->Get("null", &null_val));
+    EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
+    std::string str_val;
+    EXPECT_TRUE(dict_val->GetString("S", &str_val));
+    EXPECT_EQ("str", str_val);
+
+    std::unique_ptr<Value> root2 = JSONReader::Read(
+        "{\"number\":9.87654321, \"null\":null , \"\\x53\" : \"str\", }",
+        JSON_ALLOW_TRAILING_COMMAS);
+    ASSERT_TRUE(root2);
+    EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+    // Test newline equivalence.
+    root2 = JSONReader::Read(
+        "{\n"
+        "  \"number\":9.87654321,\n"
+        "  \"null\":null,\n"
+        "  \"\\x53\":\"str\",\n"
+        "}\n",
+        JSON_ALLOW_TRAILING_COMMAS);
+    ASSERT_TRUE(root2);
+    EXPECT_TRUE(dict_val->Equals(root2.get()));
+
+    root2 = JSONReader::Read(
+        "{\r\n"
+        "  \"number\":9.87654321,\r\n"
+        "  \"null\":null,\r\n"
+        "  \"\\x53\":\"str\",\r\n"
+        "}\r\n",
+        JSON_ALLOW_TRAILING_COMMAS);
+    ASSERT_TRUE(root2);
+    EXPECT_TRUE(dict_val->Equals(root2.get()));
+  }
+
+  {
+    // Test nesting
+    std::unique_ptr<DictionaryValue> dict_val =
+        DictionaryValue::From(JSONReader::Read(
+            "{\"inner\":{\"array\":[true]},\"false\":false,\"d\":{}}"));
+    ASSERT_TRUE(dict_val);
+    DictionaryValue* inner_dict = nullptr;
+    ASSERT_TRUE(dict_val->GetDictionary("inner", &inner_dict));
+    ListValue* inner_array = nullptr;
+    ASSERT_TRUE(inner_dict->GetList("array", &inner_array));
+    EXPECT_EQ(1U, inner_array->GetSize());
+    bool bool_value = true;
+    EXPECT_TRUE(dict_val->GetBoolean("false", &bool_value));
+    EXPECT_FALSE(bool_value);
+    inner_dict = nullptr;
+    EXPECT_TRUE(dict_val->GetDictionary("d", &inner_dict));
+
+    std::unique_ptr<Value> root2 = JSONReader::Read(
+        "{\"inner\": {\"array\":[true] , },\"false\":false,\"d\":{},}",
+        JSON_ALLOW_TRAILING_COMMAS);
+    EXPECT_TRUE(dict_val->Equals(root2.get()));
+  }
+
+  {
+    // Test keys with periods
+    std::unique_ptr<DictionaryValue> dict_val = DictionaryValue::From(
+        JSONReader::Read("{\"a.b\":3,\"c\":2,\"d.e.f\":{\"g.h.i.j\":1}}"));
+    ASSERT_TRUE(dict_val);
+    int integer_value = 0;
+    EXPECT_TRUE(
+        dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+    EXPECT_EQ(3, integer_value);
+    EXPECT_TRUE(dict_val->GetIntegerWithoutPathExpansion("c", &integer_value));
+    EXPECT_EQ(2, integer_value);
+    DictionaryValue* inner_dict = nullptr;
+    ASSERT_TRUE(
+        dict_val->GetDictionaryWithoutPathExpansion("d.e.f", &inner_dict));
+    EXPECT_EQ(1U, inner_dict->size());
+    EXPECT_TRUE(
+        inner_dict->GetIntegerWithoutPathExpansion("g.h.i.j", &integer_value));
+    EXPECT_EQ(1, integer_value);
+
+    dict_val =
+        DictionaryValue::From(JSONReader::Read("{\"a\":{\"b\":2},\"a.b\":1}"));
+    ASSERT_TRUE(dict_val);
+    EXPECT_TRUE(dict_val->GetInteger("a.b", &integer_value));
+    EXPECT_EQ(2, integer_value);
+    EXPECT_TRUE(
+        dict_val->GetIntegerWithoutPathExpansion("a.b", &integer_value));
+    EXPECT_EQ(1, integer_value);
+  }
+
+  {
+    // Invalid, no closing brace
+    EXPECT_FALSE(JSONReader::Read("{\"a\": true"));
+
+    // Invalid, keys must be quoted
+    EXPECT_FALSE(JSONReader::Read("{foo:true}"));
+
+    // Invalid, trailing comma
+    EXPECT_FALSE(JSONReader::Read("{\"a\":true,}"));
+
+    // Invalid, too many commas
+    EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}"));
+    EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+                                  JSON_ALLOW_TRAILING_COMMAS));
+
+    // Invalid, no separator
+    EXPECT_FALSE(JSONReader::Read("{\"a\" \"b\"}"));
+
+    // Invalid, lone comma.
+    EXPECT_FALSE(JSONReader::Read("{,}"));
+    EXPECT_FALSE(JSONReader::Read("{,}", JSON_ALLOW_TRAILING_COMMAS));
+    EXPECT_FALSE(
+        JSONReader::Read("{\"a\":true,,}", JSON_ALLOW_TRAILING_COMMAS));
+    EXPECT_FALSE(JSONReader::Read("{,\"a\":true}", JSON_ALLOW_TRAILING_COMMAS));
+    EXPECT_FALSE(JSONReader::Read("{\"a\":true,,\"b\":false}",
+                                  JSON_ALLOW_TRAILING_COMMAS));
+  }
+
+  {
+    // Test stack overflow
+    std::string evil(1000000, '[');
+    evil.append(std::string(1000000, ']'));
+    EXPECT_FALSE(JSONReader::Read(evil));
+  }
+
+  {
+    // A few thousand adjacent lists is fine.
+    std::string not_evil("[");
+    not_evil.reserve(15010);
+    for (int i = 0; i < 5000; ++i)
+      not_evil.append("[],");
+    not_evil.append("[]]");
+    std::unique_ptr<ListValue> list =
+        ListValue::From(JSONReader::Read(not_evil));
+    ASSERT_TRUE(list);
+    EXPECT_EQ(5001U, list->GetSize());
+  }
+
+  {
+    // Test utf8 encoded input
+    std::unique_ptr<Value> root =
+        JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    std::string str_val;
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
+
+    std::unique_ptr<DictionaryValue> dict_val =
+        DictionaryValue::From(JSONReader().ReadToValue(
+            "{\"path\": \"/tmp/\xc3\xa0\xc3\xa8\xc3\xb2.png\"}"));
+    ASSERT_TRUE(dict_val);
+    EXPECT_TRUE(dict_val->GetString("path", &str_val));
+    EXPECT_EQ("/tmp/\xC3\xA0\xC3\xA8\xC3\xB2.png", str_val);
+  }
+
+  {
+    // Test invalid utf8 encoded input
+    EXPECT_FALSE(JSONReader().ReadToValue("\"345\xb0\xa1\xb0\xa2\""));
+    EXPECT_FALSE(JSONReader().ReadToValue("\"123\xc0\x81\""));
+    EXPECT_FALSE(JSONReader().ReadToValue("\"abc\xc0\xae\""));
+  }
+
+  {
+    // Test utf16 encoded strings.
+    std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    std::string str_val;
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ(
+        "\xe2\x82\xac"
+        "3,14",
+        str_val);
+
+    root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
+    ASSERT_TRUE(root);
+    EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+    str_val.clear();
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
+  }
+
+  {
+    // Test invalid utf16 strings.
+    const char* const cases[] = {
+        "\"\\u123\"",          // Invalid scalar.
+        "\"\\ud83d\"",         // Invalid scalar.
+        "\"\\u$%@!\"",         // Invalid scalar.
+        "\"\\uzz89\"",         // Invalid scalar.
+        "\"\\ud83d\\udca\"",   // Invalid lower surrogate.
+        "\"\\ud83d\\ud83d\"",  // Invalid lower surrogate.
+        "\"\\ud83foo\"",       // No lower surrogate.
+        "\"\\ud83\\foo\""      // No lower surrogate.
+    };
+    std::unique_ptr<Value> root;
+    for (size_t i = 0; i < arraysize(cases); ++i) {
+      root = JSONReader().ReadToValue(cases[i]);
+      EXPECT_FALSE(root) << cases[i];
+    }
+  }
+
+  {
+    // Test literal root objects.
+    std::unique_ptr<Value> root = JSONReader::Read("null");
+    EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+
+    root = JSONReader::Read("true");
+    ASSERT_TRUE(root);
+    bool bool_value;
+    EXPECT_TRUE(root->GetAsBoolean(&bool_value));
+    EXPECT_TRUE(bool_value);
+
+    root = JSONReader::Read("10");
+    ASSERT_TRUE(root);
+    int integer_value;
+    EXPECT_TRUE(root->GetAsInteger(&integer_value));
+    EXPECT_EQ(10, integer_value);
+
+    root = JSONReader::Read("\"root\"");
+    ASSERT_TRUE(root);
+    std::string str_val;
+    EXPECT_TRUE(root->GetAsString(&str_val));
+    EXPECT_EQ("root", str_val);
+  }
 }
 
 #if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
@@ -553,12 +578,11 @@
   ASSERT_TRUE(base::PathExists(path));
 
   std::string input;
-  ASSERT_TRUE(ReadFileToString(
-      path.Append(FILE_PATH_LITERAL("bom_feff.json")), &input));
+  ASSERT_TRUE(ReadFileToString(path.AppendASCII("bom_feff.json"), &input));
 
   JSONReader reader;
-  scoped_ptr<Value> root(reader.ReadToValue(input));
-  ASSERT_TRUE(root.get()) << reader.GetErrorMessage();
+  std::unique_ptr<Value> root(reader.ReadToValue(input));
+  ASSERT_TRUE(root) << reader.GetErrorMessage();
   EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
 }
 #endif  // !__ANDROID__ && !__ANDROID_HOST__
@@ -566,15 +590,15 @@
 // Tests that the root of a JSON object can be deleted safely while its
 // children outlive it.
 TEST(JSONReaderTest, StringOptimizations) {
-  scoped_ptr<Value> dict_literal_0;
-  scoped_ptr<Value> dict_literal_1;
-  scoped_ptr<Value> dict_string_0;
-  scoped_ptr<Value> dict_string_1;
-  scoped_ptr<Value> list_value_0;
-  scoped_ptr<Value> list_value_1;
+  std::unique_ptr<Value> dict_literal_0;
+  std::unique_ptr<Value> dict_literal_1;
+  std::unique_ptr<Value> dict_string_0;
+  std::unique_ptr<Value> dict_string_1;
+  std::unique_ptr<Value> list_value_0;
+  std::unique_ptr<Value> list_value_1;
 
   {
-    scoped_ptr<Value> root = JSONReader::Read(
+    std::unique_ptr<Value> root = JSONReader::Read(
         "{"
         "  \"test\": {"
         "    \"foo\": true,"
@@ -588,25 +612,25 @@
         "  ]"
         "}",
         JSON_DETACHABLE_CHILDREN);
-    ASSERT_TRUE(root.get());
+    ASSERT_TRUE(root);
 
-    DictionaryValue* root_dict = NULL;
+    DictionaryValue* root_dict = nullptr;
     ASSERT_TRUE(root->GetAsDictionary(&root_dict));
 
-    DictionaryValue* dict = NULL;
-    ListValue* list = NULL;
+    DictionaryValue* dict = nullptr;
+    ListValue* list = nullptr;
 
     ASSERT_TRUE(root_dict->GetDictionary("test", &dict));
     ASSERT_TRUE(root_dict->GetList("list", &list));
 
-    EXPECT_TRUE(dict->Remove("foo", &dict_literal_0));
-    EXPECT_TRUE(dict->Remove("bar", &dict_literal_1));
-    EXPECT_TRUE(dict->Remove("baz", &dict_string_0));
-    EXPECT_TRUE(dict->Remove("moo", &dict_string_1));
+    ASSERT_TRUE(dict->Remove("foo", &dict_literal_0));
+    ASSERT_TRUE(dict->Remove("bar", &dict_literal_1));
+    ASSERT_TRUE(dict->Remove("baz", &dict_string_0));
+    ASSERT_TRUE(dict->Remove("moo", &dict_string_1));
 
     ASSERT_EQ(2u, list->GetSize());
-    EXPECT_TRUE(list->Remove(0, &list_value_0));
-    EXPECT_TRUE(list->Remove(0, &list_value_1));
+    ASSERT_TRUE(list->Remove(0, &list_value_0));
+    ASSERT_TRUE(list->Remove(0, &list_value_1));
   }
 
   bool b = false;
@@ -635,19 +659,14 @@
 // parser implementation against buffer overflow. Best run with DCHECKs so
 // that the one in NextChar fires.
 TEST(JSONReaderTest, InvalidSanity) {
-  const char* const invalid_json[] = {
-      "/* test *",
-      "{\"foo\"",
-      "{\"foo\":",
-      "  [",
-      "\"\\u123g\"",
-      "{\n\"eh:\n}",
+  const char* const kInvalidJson[] = {
+      "/* test *", "{\"foo\"", "{\"foo\":", "  [", "\"\\u123g\"", "{\n\"eh:\n}",
   };
 
-  for (size_t i = 0; i < arraysize(invalid_json); ++i) {
+  for (size_t i = 0; i < arraysize(kInvalidJson); ++i) {
     JSONReader reader;
-    LOG(INFO) << "Sanity test " << i << ": <" << invalid_json[i] << ">";
-    EXPECT_FALSE(reader.ReadToValue(invalid_json[i]));
+    LOG(INFO) << "Sanity test " << i << ": <" << kInvalidJson[i] << ">";
+    EXPECT_FALSE(reader.ReadToValue(kInvalidJson[i]));
     EXPECT_NE(JSONReader::JSON_NO_ERROR, reader.error_code());
     EXPECT_NE("", reader.GetErrorMessage());
   }
diff --git a/base/json/json_string_value_serializer.cc b/base/json/json_string_value_serializer.cc
index af7e010..cd786db 100644
--- a/base/json/json_string_value_serializer.cc
+++ b/base/json/json_string_value_serializer.cc
@@ -48,7 +48,7 @@
 
 JSONStringValueDeserializer::~JSONStringValueDeserializer() {}
 
-scoped_ptr<Value> JSONStringValueDeserializer::Deserialize(
+std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
     int* error_code,
     std::string* error_str) {
   return base::JSONReader::ReadAndReturnError(
diff --git a/base/json/json_string_value_serializer.h b/base/json/json_string_value_serializer.h
index 2459f48..a97da23 100644
--- a/base/json/json_string_value_serializer.h
+++ b/base/json/json_string_value_serializer.h
@@ -59,8 +59,8 @@
   // If |error_message| is non-null, it will be filled in with a formatted
   // error message including the location of the error if appropriate.
   // The caller takes ownership of the returned value.
-  scoped_ptr<base::Value> Deserialize(int* error_code,
-                                      std::string* error_message) override;
+  std::unique_ptr<base::Value> Deserialize(int* error_code,
+                                           std::string* error_message) override;
 
   void set_allow_trailing_comma(bool new_value) {
     allow_trailing_comma_ = new_value;
diff --git a/base/json/json_value_converter.h b/base/json/json_value_converter.h
index a1e0d5b..4cca034 100644
--- a/base/json/json_value_converter.h
+++ b/base/json/json_value_converter.h
@@ -7,13 +7,13 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/scoped_vector.h"
 #include "base/stl_util.h"
 #include "base/strings/string16.h"
@@ -131,7 +131,7 @@
 
  private:
   FieldType StructType::* field_pointer_;
-  scoped_ptr<ValueConverter<FieldType> > value_converter_;
+  std::unique_ptr<ValueConverter<FieldType>> value_converter_;
   DISALLOW_COPY_AND_ASSIGN(FieldConverter);
 };
 
@@ -266,7 +266,7 @@
       if (!list->Get(i, &element))
         continue;
 
-      scoped_ptr<Element> e(new Element);
+      std::unique_ptr<Element> e(new Element);
       if (basic_converter_.Convert(*element, e.get())) {
         field->push_back(e.release());
       } else {
@@ -300,7 +300,7 @@
       if (!list->Get(i, &element))
         continue;
 
-      scoped_ptr<NestedType> nested(new NestedType);
+      std::unique_ptr<NestedType> nested(new NestedType);
       if (converter_.Convert(*element, nested.get())) {
         field->push_back(nested.release());
       } else {
@@ -337,7 +337,7 @@
       if (!list->Get(i, &element))
         continue;
 
-      scoped_ptr<NestedType> nested(new NestedType);
+      std::unique_ptr<NestedType> nested(new NestedType);
       if ((*convert_func_)(element, nested.get())) {
         field->push_back(nested.release());
       } else {
diff --git a/base/json/json_value_converter_unittest.cc b/base/json/json_value_converter_unittest.cc
index 9038610..56ade24 100644
--- a/base/json/json_value_converter_unittest.cc
+++ b/base/json/json_value_converter_unittest.cc
@@ -4,11 +4,11 @@
 
 #include "base/json/json_value_converter.h"
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/scoped_vector.h"
 #include "base/strings/string_piece.h"
 #include "base/values.h"
@@ -106,7 +106,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_TRUE(converter.Convert(*value.get(), &message));
@@ -148,7 +148,7 @@
       "  }]\n"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   NestedMessage message;
   base::JSONValueConverter<NestedMessage> converter;
   EXPECT_TRUE(converter.Convert(*value.get(), &message));
@@ -190,7 +190,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_FALSE(converter.Convert(*value.get(), &message));
@@ -206,7 +206,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   // Convert() still succeeds even if the input doesn't have "bar" field.
@@ -229,7 +229,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_FALSE(converter.Convert(*value.get(), &message));
@@ -246,7 +246,7 @@
       "  \"ints\": [1, false]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_FALSE(converter.Convert(*value.get(), &message));
diff --git a/base/json/json_value_serializer_unittest.cc b/base/json/json_value_serializer_unittest.cc
index 7f2ae10..0c079b7 100644
--- a/base/json/json_value_serializer_unittest.cc
+++ b/base/json/json_value_serializer_unittest.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <memory>
 #include <string>
 
 #include "base/files/file_util.h"
@@ -10,7 +11,6 @@
 #include "base/json/json_reader.h"
 #include "base/json/json_string_value_serializer.h"
 #include "base/json/json_writer.h"
-#include "base/memory/scoped_ptr.h"
 #if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
 #include "base/path_service.h"
 #endif
@@ -78,7 +78,7 @@
 }
 
 void ValidateJsonList(const std::string& json) {
-  scoped_ptr<Value> root = JSONReader::Read(json);
+  std::unique_ptr<Value> root = JSONReader::Read(json);
   ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
   ListValue* list = static_cast<ListValue*>(root.get());
   ASSERT_EQ(1U, list->GetSize());
@@ -96,7 +96,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
@@ -115,7 +115,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
@@ -132,7 +132,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_FALSE(value.get());
   ASSERT_NE(0, error_code);
@@ -160,7 +160,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       file_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
@@ -185,7 +185,7 @@
   // This must fail without the proper flag.
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       file_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_FALSE(value.get());
   ASSERT_NE(0, error_code);
@@ -200,8 +200,8 @@
 }
 
 TEST(JSONValueDeserializerTest, AllowTrailingComma) {
-  scoped_ptr<Value> root;
-  scoped_ptr<Value> root_expected;
+  std::unique_ptr<Value> root;
+  std::unique_ptr<Value> root_expected;
   static const char kTestWithCommas[] = "{\"key\": [true,],}";
   static const char kTestNoCommas[] = "{\"key\": [true]}";
 
@@ -219,7 +219,7 @@
   static const char kOriginalSerialization[] =
     "{\"bool\":true,\"double\":3.14,\"int\":42,\"list\":[1,2],\"null\":null}";
   JSONStringValueDeserializer deserializer(kOriginalSerialization);
-  scoped_ptr<Value> root = deserializer.Deserialize(NULL, NULL);
+  std::unique_ptr<Value> root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(root.get());
   ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
 
@@ -329,7 +329,7 @@
 
   // escaped ascii text -> json
   JSONStringValueDeserializer deserializer(kExpected);
-  scoped_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+  std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(deserial_root.get());
   DictionaryValue* dict_root =
       static_cast<DictionaryValue*>(deserial_root.get());
@@ -353,7 +353,7 @@
 
   // escaped ascii text -> json
   JSONStringValueDeserializer deserializer(kExpected);
-  scoped_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+  std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(deserial_root.get());
   DictionaryValue* dict_root =
       static_cast<DictionaryValue*>(deserial_root.get());
@@ -380,7 +380,7 @@
   ValidateJsonList("[ 1 //// ,2\r\n ]");
 
   // It's ok to have a comment in a string.
-  scoped_ptr<Value> root = JSONReader::Read("[\"// ok\\n /* foo */ \"]");
+  std::unique_ptr<Value> root = JSONReader::Read("[\"// ok\\n /* foo */ \"]");
   ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
   ListValue* list = static_cast<ListValue*>(root.get());
   ASSERT_EQ(1U, list->GetSize());
@@ -416,7 +416,7 @@
   ASSERT_TRUE(PathExists(original_file_path));
 
   JSONFileValueDeserializer deserializer(original_file_path);
-  scoped_ptr<Value> root;
+  std::unique_ptr<Value> root;
   root = deserializer.Deserialize(NULL, NULL);
 
   ASSERT_TRUE(root.get());
@@ -464,7 +464,7 @@
   ASSERT_TRUE(PathExists(original_file_path));
 
   JSONFileValueDeserializer deserializer(original_file_path);
-  scoped_ptr<Value> root;
+  std::unique_ptr<Value> root;
   root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(root.get());
 
@@ -489,7 +489,7 @@
       FILE_PATH_LITERAL("serializer_test_nowhitespace.json"));
   ASSERT_TRUE(PathExists(source_file_path));
   JSONFileValueDeserializer deserializer(source_file_path);
-  scoped_ptr<Value> root;
+  std::unique_ptr<Value> root;
   root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(root.get());
 }
diff --git a/base/json/json_writer.cc b/base/json/json_writer.cc
index 19bc0da..0b658ee 100644
--- a/base/json/json_writer.cc
+++ b/base/json/json_writer.cc
@@ -127,9 +127,7 @@
       bool first_value_has_been_output = false;
       bool result = node.GetAsList(&list);
       DCHECK(result);
-      for (ListValue::const_iterator it = list->begin(); it != list->end();
-           ++it) {
-        const Value* value = *it;
+      for (const auto& value : *list) {
         if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY)
           continue;
 
diff --git a/base/json/json_writer_unittest.cc b/base/json/json_writer_unittest.cc
index a62b3ba..233ac5e 100644
--- a/base/json/json_writer_unittest.cc
+++ b/base/json/json_writer_unittest.cc
@@ -3,6 +3,8 @@
 // found in the LICENSE file.
 
 #include "base/json/json_writer.h"
+
+#include "base/memory/ptr_util.h"
 #include "base/values.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -55,11 +57,11 @@
   // Writer unittests like empty list/dict nesting,
   // list list nesting, etc.
   DictionaryValue root_dict;
-  scoped_ptr<ListValue> list(new ListValue());
-  scoped_ptr<DictionaryValue> inner_dict(new DictionaryValue());
+  std::unique_ptr<ListValue> list(new ListValue());
+  std::unique_ptr<DictionaryValue> inner_dict(new DictionaryValue());
   inner_dict->SetInteger("inner int", 10);
   list->Append(std::move(inner_dict));
-  list->Append(make_scoped_ptr(new ListValue()));
+  list->Append(WrapUnique(new ListValue()));
   list->AppendBoolean(true);
   root_dict.Set("list", std::move(list));
 
@@ -91,7 +93,7 @@
   DictionaryValue period_dict;
   period_dict.SetIntegerWithoutPathExpansion("a.b", 3);
   period_dict.SetIntegerWithoutPathExpansion("c", 2);
-  scoped_ptr<DictionaryValue> period_dict2(new DictionaryValue());
+  std::unique_ptr<DictionaryValue> period_dict2(new DictionaryValue());
   period_dict2->SetIntegerWithoutPathExpansion("g.h.i.j", 1);
   period_dict.SetWithoutPathExpansion("d.e.f", std::move(period_dict2));
   EXPECT_TRUE(JSONWriter::Write(period_dict, &output_js));
@@ -109,7 +111,7 @@
 
   // Binary values should return errors unless suppressed via the
   // OPTIONS_OMIT_BINARY_VALUES flag.
-  scoped_ptr<Value> root(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+  std::unique_ptr<Value> root(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   EXPECT_FALSE(JSONWriter::Write(*root, &output_js));
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
       *root, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
@@ -117,9 +119,9 @@
 
   ListValue binary_list;
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
-  binary_list.Append(make_scoped_ptr(new FundamentalValue(5)));
+  binary_list.Append(WrapUnique(new FundamentalValue(5)));
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
-  binary_list.Append(make_scoped_ptr(new FundamentalValue(2)));
+  binary_list.Append(WrapUnique(new FundamentalValue(2)));
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   EXPECT_FALSE(JSONWriter::Write(binary_list, &output_js));
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
@@ -127,14 +129,11 @@
   EXPECT_EQ("[5,2]", output_js);
 
   DictionaryValue binary_dict;
-  binary_dict.Set(
-      "a", make_scoped_ptr(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+  binary_dict.Set("a", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   binary_dict.SetInteger("b", 5);
-  binary_dict.Set(
-      "c", make_scoped_ptr(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+  binary_dict.Set("c", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   binary_dict.SetInteger("d", 2);
-  binary_dict.Set(
-      "e", make_scoped_ptr(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+  binary_dict.Set("e", BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   EXPECT_FALSE(JSONWriter::Write(binary_dict, &output_js));
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
       binary_dict, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
diff --git a/base/lazy_instance.h b/base/lazy_instance.h
index 1a921e6..ac970c5 100644
--- a/base/lazy_instance.h
+++ b/base/lazy_instance.h
@@ -39,6 +39,7 @@
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
+#include "base/debug/leak_annotations.h"
 #include "base/logging.h"
 #include "base/memory/aligned_memory.h"
 #include "base/threading/thread_restrictions.h"
@@ -97,10 +98,10 @@
 #endif
 
   static Type* New(void* instance) {
+    ANNOTATE_SCOPED_MEMORY_LEAK;
     return DefaultLazyInstanceTraits<Type>::New(instance);
   }
-  static void Delete(Type* /* instance */) {
-  }
+  static void Delete(Type*) {}
 };
 
 // Our AtomicWord doubles as a spinlock, where a value of
diff --git a/base/location.h b/base/location.h
index d3bb23c..21e270c 100644
--- a/base/location.h
+++ b/base/location.h
@@ -11,7 +11,7 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/containers/hash_tables.h"
+#include "base/hash.h"
 
 namespace tracked_objects {
 
@@ -59,7 +59,7 @@
       // it comes from __FILE__, so no need to check the contents of the string.
       // See the definition of FROM_HERE in location.h, and how it is used
       // elsewhere.
-      return base::HashPair(reinterpret_cast<uintptr_t>(location.file_name()),
+      return base::HashInts(reinterpret_cast<uintptr_t>(location.file_name()),
                             location.line_number());
     }
   };
diff --git a/base/logging.cc b/base/logging.cc
index 3450b9a..381e9ee 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -13,8 +13,6 @@
 #if defined(OS_WIN)
 #include <io.h>
 #include <windows.h>
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
 typedef HANDLE FileHandle;
 typedef HANDLE MutexHandle;
 // Windows warns on using write().  It prefers _write().
@@ -30,8 +28,6 @@
 #elif defined(OS_POSIX)
 #if defined(OS_NACL)
 #include <sys/time.h>  // timespec doesn't seem to be in <time.h>
-#else
-#include <sys/syscall.h>
 #endif
 #include <time.h>
 #endif
@@ -51,7 +47,6 @@
 #endif
 
 #include <algorithm>
-#include <cassert>
 #include <cstring>
 #include <ctime>
 #include <iomanip>
@@ -63,7 +58,6 @@
 #include "base/debug/alias.h"
 #include "base/debug/debugger.h"
 #include "base/debug/stack_trace.h"
-#include "base/files/file_path.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
@@ -77,6 +71,10 @@
 #include "base/posix/safe_strerror.h"
 #endif
 
+#if !defined(OS_ANDROID)
+#include "base/files/file_path.h"
+#endif
+
 #if defined(OS_ANDROID) || defined(__ANDROID__)
 #include <android/log.h>
 #endif
@@ -209,8 +207,7 @@
     UnlockLogging();
   }
 
-  static void Init(LogLockingState lock_log,
-                   const PathChar* /* new_log_file */) {
+  static void Init(LogLockingState lock_log, const PathChar* /*new_log_file*/) {
     if (initialized)
       return;
     lock_log_file = lock_log;
@@ -294,13 +291,24 @@
                             FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
                             OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, nullptr);
     if (g_log_file == INVALID_HANDLE_VALUE || g_log_file == nullptr) {
+      // We are intentionally not using FilePath or FileUtil here to reduce the
+      // dependencies of the logging implementation. For e.g. FilePath and
+      // FileUtil depend on shell32 and user32.dll. This is not acceptable for
+      // some consumers of base logging like chrome_elf, etc.
+      // Please don't change the code below to use FilePath.
       // try the current directory
-      base::FilePath file_path;
-      if (!base::GetCurrentDirectory(&file_path))
+      wchar_t system_buffer[MAX_PATH];
+      system_buffer[0] = 0;
+      DWORD len = ::GetCurrentDirectory(arraysize(system_buffer),
+                                        system_buffer);
+      if (len == 0 || len > arraysize(system_buffer))
         return false;
 
-      *g_log_file_name = file_path.Append(
-          FILE_PATH_LITERAL("debug.log")).value();
+      *g_log_file_name = system_buffer;
+      // Append a trailing backslash if needed.
+      if (g_log_file_name->back() != L'\\')
+        *g_log_file_name += L"\\";
+      *g_log_file_name += L"debug.log";
 
       g_log_file = CreateFile(g_log_file_name->c_str(), FILE_APPEND_DATA,
                               FILE_SHARE_READ | FILE_SHARE_WRITE, nullptr,
@@ -461,6 +469,10 @@
 template std::string* MakeCheckOpString<std::string, std::string>(
     const std::string&, const std::string&, const char* name);
 
+void MakeCheckOpValueString(std::ostream* os, std::nullptr_t) {
+  (*os) << "nullptr";
+}
+
 #if !defined(NDEBUG)
 // Displays a message box to the user with the error message in it.
 // Used for fatal messages, where we close the app simultaneously.
@@ -769,8 +781,12 @@
     stream_ << base::PlatformThread::CurrentId() << ':';
   if (g_log_timestamp) {
     time_t t = time(nullptr);
+#if defined(__ANDROID__) || defined(ANDROID)
     struct tm local_time;
     memset(&local_time, 0, sizeof(local_time));
+#else
+    struct tm local_time = {0};
+#endif
 #ifdef _MSC_VER
     localtime_s(&local_time, &t);
 #else
@@ -877,7 +893,7 @@
 }
 
 void RawLog(int level, const char* message) {
-  if (level >= g_min_log_level) {
+  if (level >= g_min_log_level && message) {
     size_t bytes_written = 0;
     const size_t message_len = strlen(message);
     int rv;
@@ -930,5 +946,5 @@
 }  // namespace logging
 
 std::ostream& std::operator<<(std::ostream& out, const wchar_t* wstr) {
-  return out << base::WideToUTF8(wstr);
+  return out << (wstr ? base::WideToUTF8(wstr) : std::string());
 }
diff --git a/base/logging.h b/base/logging.h
index c79c84c..2bfc972 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -11,11 +11,13 @@
 #include <cstring>
 #include <sstream>
 #include <string>
-#include <typeinfo>
+#include <type_traits>
+#include <utility>
 
 #include "base/base_export.h"
 #include "base/debug/debugger.h"
 #include "base/macros.h"
+#include "base/template_util.h"
 #include "build/build_config.h"
 
 //
@@ -402,9 +404,6 @@
 #define LOG_IF(severity, condition) \
   LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
 
-#define SYSLOG(severity) LOG(severity)
-#define SYSLOG_IF(severity, condition) LOG_IF(severity, condition)
-
 // The VLOG macros log with negative verbosities.
 #define VLOG_STREAM(verbose_level) \
   logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
@@ -437,8 +436,6 @@
 
 #define LOG_ASSERT(condition)  \
   LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
-#define SYSLOG_ASSERT(condition) \
-  SYSLOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
 
 #if defined(OS_WIN)
 #define PLOG_STREAM(severity) \
@@ -464,7 +461,7 @@
 // boolean.
 class CheckOpResult {
  public:
-  // |message| must be null if and only if the check failed.
+  // |message| must be non-null if and only if the check failed.
   CheckOpResult(std::string* message) : message_(message) {}
   // Returns true if the check succeeded.
   operator bool() const { return !message_; }
@@ -482,22 +479,28 @@
 // We make sure CHECK et al. always evaluates their arguments, as
 // doing CHECK(FunctionWithSideEffect()) is a common idiom.
 
-#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && !defined(OS_ANDROID)
+#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
 
 // Make all CHECK functions discard their log strings to reduce code
-// bloat for official release builds (except Android).
+// bloat, and improve performance, for official release builds.
 
-// TODO(akalin): This would be more valuable if there were some way to
-// remove BreakDebugger() from the backtrace, perhaps by turning it
-// into a macro (like __debugbreak() on Windows).
+#if defined(COMPILER_GCC) || __clang__
+#define LOGGING_CRASH() __builtin_trap()
+#else
+#define LOGGING_CRASH() ((void)(*(volatile char*)0 = 0))
+#endif
+
+// This is not calling BreakDebugger since this is called frequently, and
+// calling an out-of-line function instead of a noreturn inline macro prevents
+// compiler optimizations.
 #define CHECK(condition)                                                \
-  !(condition) ? ::base::debug::BreakDebugger() : EAT_STREAM_PARAMETERS
+  !(condition) ? LOGGING_CRASH() : EAT_STREAM_PARAMETERS
 
 #define PCHECK(condition) CHECK(condition)
 
 #define CHECK_OP(name, op, val1, val2) CHECK((val1) op (val2))
 
-#else
+#else  // !(OFFICIAL_BUILD && NDEBUG)
 
 #if defined(_PREFAST_) && defined(OS_WIN)
 // Use __analysis_assume to tell the VC++ static analysis engine that
@@ -545,7 +548,31 @@
   else                                                                         \
     logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
 
-#endif
+#endif  // !(OFFICIAL_BUILD && NDEBUG)
+
+// This formats a value for a failing CHECK_XX statement.  Ordinarily,
+// it uses the definition for operator<<, with a few special cases below.
+template <typename T>
+inline typename std::enable_if<
+    base::internal::SupportsOstreamOperator<const T&>::value,
+    void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+  (*os) << v;
+}
+
+// We need overloads for enums that don't support operator<<.
+// (i.e. scoped enums where no operator<< overload was declared).
+template <typename T>
+inline typename std::enable_if<
+    !base::internal::SupportsOstreamOperator<const T&>::value &&
+        std::is_enum<T>::value,
+    void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+  (*os) << static_cast<typename base::underlying_type<T>::type>(v);
+}
+
+// We need an explicit overload for std::nullptr_t.
+BASE_EXPORT void MakeCheckOpValueString(std::ostream* os, std::nullptr_t p);
 
 // Build the error message string.  This is separate from the "Impl"
 // function template because it is not performance critical and so can
@@ -554,7 +581,11 @@
 template<class t1, class t2>
 std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
   std::ostringstream ss;
-  ss << names << " (" << v1 << " vs. " << v2 << ")";
+  ss << names << " (";
+  MakeCheckOpValueString(&ss, v1);
+  ss << " vs. ";
+  MakeCheckOpValueString(&ss, v2);
+  ss << ")";
   std::string* msg = new std::string(ss.str());
   return msg;
 }
@@ -606,7 +637,7 @@
 #define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
 #define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
 
-#if defined(NDEBUG)
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
 #define ENABLE_DLOG 0
 #else
 #define ENABLE_DLOG 1
@@ -748,9 +779,10 @@
 // for example:
 //   DCHECK_EQ(string("abc")[1], 'b');
 //
-// WARNING: These may not compile correctly if one of the arguments is a pointer
-// and the other is NULL. To work around this, simply static_cast NULL to the
-// type of the desired pointer.
+// WARNING: These don't compile correctly if one of the arguments is a pointer
+// and the other is NULL.  In new code, prefer nullptr instead.  To
+// work around this for C++98, simply static_cast NULL to the type of the
+// desired pointer.
 
 #define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2)
 #define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2)
@@ -836,12 +868,6 @@
   DISALLOW_COPY_AND_ASSIGN(LogMessage);
 };
 
-// A non-macro interface to the log facility; (useful
-// when the logging level is not a compile-time constant).
-inline void LogAtLevel(int log_level, const std::string& msg) {
-  LogMessage(__FILE__, __LINE__, log_level).stream() << msg;
-}
-
 // This class is used to explicitly ignore values in the conditional
 // logging macros.  This avoids compiler warnings like "value computed
 // is not used" and "statement has no effect".
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
index 22fb855..8a20c54 100644
--- a/base/logging_unittest.cc
+++ b/base/logging_unittest.cc
@@ -191,7 +191,7 @@
 #endif
 
 TEST_F(LoggingTest, DebugLoggingReleaseBehavior) {
-#if !defined(NDEBUG)
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
   int debug_only_variable = 1;
 #endif
   // These should avoid emitting references to |debug_only_variable|
@@ -226,7 +226,7 @@
   // Release build with real DCHECKS.
   SetLogAssertHandler(&LogSink);
   EXPECT_TRUE(DCHECK_IS_ON());
-  EXPECT_FALSE(DLOG_IS_ON(DCHECK));
+  EXPECT_TRUE(DLOG_IS_ON(DCHECK));
 #else
   // Debug build.
   SetLogAssertHandler(&LogSink);
@@ -241,6 +241,23 @@
   EXPECT_EQ(DCHECK_IS_ON() ? 2 : 0, log_sink_call_count);
   DCHECK_EQ(0, 1);
   EXPECT_EQ(DCHECK_IS_ON() ? 3 : 0, log_sink_call_count);
+
+  // Test DCHECK on std::nullptr_t
+  log_sink_call_count = 0;
+  const void* p_null = nullptr;
+  const void* p_not_null = &p_null;
+  DCHECK_EQ(p_null, nullptr);
+  DCHECK_EQ(nullptr, p_null);
+  DCHECK_NE(p_not_null, nullptr);
+  DCHECK_NE(nullptr, p_not_null);
+  EXPECT_EQ(0, log_sink_call_count);
+
+  // Test DCHECK on a scoped enum.
+  enum class Animal { DOG, CAT };
+  DCHECK_EQ(Animal::DOG, Animal::DOG);
+  EXPECT_EQ(0, log_sink_call_count);
+  DCHECK_EQ(Animal::DOG, Animal::CAT);
+  EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
 }
 
 TEST_F(LoggingTest, DcheckReleaseBehavior) {
diff --git a/base/mac/bind_objc_block.h b/base/mac/bind_objc_block.h
index c31f26e..2434d44 100644
--- a/base/mac/bind_objc_block.h
+++ b/base/mac/bind_objc_block.h
@@ -45,8 +45,11 @@
 // note above).
 template<typename R, typename... Args>
 base::Callback<R(Args...)> BindBlock(R(^block)(Args...)) {
-  return base::Bind(&base::internal::RunBlock<R, Args...>,
-                    base::mac::ScopedBlock<R(^)(Args...)>(Block_copy(block)));
+  return base::Bind(
+      &base::internal::RunBlock<R, Args...>,
+      base::mac::ScopedBlock<R (^)(Args...)>(
+          base::mac::internal::ScopedBlockTraits<R (^)(Args...)>::Retain(
+              block)));
 }
 
 }  // namespace base
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
index 524f17c..4f6fa60 100644
--- a/base/mac/foundation_util.mm
+++ b/base/mac/foundation_util.mm
@@ -18,6 +18,10 @@
 #include "build/build_config.h"
 
 #if !defined(OS_IOS)
+#import <AppKit/AppKit.h>
+#endif
+
+#if !defined(OS_IOS)
 extern "C" {
 CFTypeID SecACLGetTypeID();
 CFTypeID SecTrustedApplicationGetTypeID();
@@ -151,7 +155,7 @@
   exec_name.GetComponents(&components);
 
   // It's an error if we don't get any components.
-  if (!components.size())
+  if (components.empty())
     return FilePath();
 
   // Don't prepend '/' to the first component.
@@ -165,7 +169,7 @@
 
   // The first component may be "/" or "//", etc. Only append '/' if it doesn't
   // already end in '/'.
-  if (bundle_name[bundle_name.length() - 1] != '/')
+  if (bundle_name.back() != '/')
     bundle_name += '/';
 
   // Go through the remaining components.
@@ -316,7 +320,7 @@
   DCHECK(!cf_val ||
          CTFontGetTypeID() == CFGetTypeID(cf_val) ||
          (_CFIsObjC(CTFontGetTypeID(), cf_val) &&
-          [ns_val isKindOfClass:NSClassFromString(@"NSFont")]));
+          [ns_val isKindOfClass:[NSFont class]]));
   return ns_val;
 }
 
@@ -324,7 +328,7 @@
   CTFontRef cf_val = reinterpret_cast<CTFontRef>(ns_val);
   DCHECK(!cf_val ||
          CTFontGetTypeID() == CFGetTypeID(cf_val) ||
-         [ns_val isKindOfClass:NSClassFromString(@"NSFont")]);
+         [ns_val isKindOfClass:[NSFont class]]);
   return cf_val;
 }
 #endif
@@ -388,7 +392,7 @@
     return NULL;
 
   id<NSObject> ns_val = reinterpret_cast<id>(const_cast<void*>(cf_val));
-  if ([ns_val isKindOfClass:NSClassFromString(@"NSFont")]) {
+  if ([ns_val isKindOfClass:[NSFont class]]) {
     return (CTFontRef)(cf_val);
   }
   return NULL;
diff --git a/base/mac/libdispatch_task_runner.cc b/base/mac/libdispatch_task_runner.cc
deleted file mode 100644
index 9d18f97..0000000
--- a/base/mac/libdispatch_task_runner.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/libdispatch_task_runner.h"
-
-#include <stdint.h>
-
-#include "base/callback.h"
-
-namespace base {
-namespace mac {
-
-LibDispatchTaskRunner::LibDispatchTaskRunner(const char* name)
-    : queue_(dispatch_queue_create(name, NULL)),
-      queue_finalized_(false, false) {
-  dispatch_set_context(queue_, this);
-  dispatch_set_finalizer_f(queue_, &LibDispatchTaskRunner::Finalizer);
-}
-
-bool LibDispatchTaskRunner::PostDelayedTask(
-    const tracked_objects::Location& /* from_here */,
-    const Closure& task,
-    base::TimeDelta delay) {
-  if (!queue_)
-    return false;
-
-  // The block runtime would implicitly copy the reference, not the object
-  // it's referencing. Copy the closure into block storage so it's available
-  // to run.
-  __block const Closure task_copy = task;
-  void(^run_task)(void) = ^{
-      task_copy.Run();
-  };
-
-  int64_t delay_nano =
-      delay.InMicroseconds() * base::Time::kNanosecondsPerMicrosecond;
-  if (delay_nano > 0) {
-    dispatch_time_t time = dispatch_time(DISPATCH_TIME_NOW, delay_nano);
-    dispatch_after(time, queue_, run_task);
-  } else {
-    dispatch_async(queue_, run_task);
-  }
-  return true;
-}
-
-bool LibDispatchTaskRunner::RunsTasksOnCurrentThread() const {
-  return queue_ == dispatch_get_current_queue();
-}
-
-bool LibDispatchTaskRunner::PostNonNestableDelayedTask(
-    const tracked_objects::Location& from_here,
-    const Closure& task,
-    base::TimeDelta delay) {
-  return PostDelayedTask(from_here, task, delay);
-}
-
-void LibDispatchTaskRunner::Shutdown() {
-  dispatch_release(queue_);
-  queue_ = NULL;
-  queue_finalized_.Wait();
-}
-
-dispatch_queue_t LibDispatchTaskRunner::GetDispatchQueue() const {
-  return queue_;
-}
-
-LibDispatchTaskRunner::~LibDispatchTaskRunner() {
-  if (queue_) {
-    dispatch_set_context(queue_, NULL);
-    dispatch_set_finalizer_f(queue_, NULL);
-    dispatch_release(queue_);
-  }
-}
-
-void LibDispatchTaskRunner::Finalizer(void* context) {
-  LibDispatchTaskRunner* self = static_cast<LibDispatchTaskRunner*>(context);
-  self->queue_finalized_.Signal();
-}
-
-}  // namespace mac
-}  // namespace base
diff --git a/base/mac/libdispatch_task_runner.h b/base/mac/libdispatch_task_runner.h
deleted file mode 100644
index b479bc7..0000000
--- a/base/mac/libdispatch_task_runner.h
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_LIBDISPATCH_TASK_RUNNER_H_
-#define BASE_MAC_LIBDISPATCH_TASK_RUNNER_H_
-
-#include <dispatch/dispatch.h>
-
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/waitable_event.h"
-
-namespace base {
-namespace mac {
-
-// This is an implementation of the TaskRunner interface that runs closures on
-// a thread managed by Apple's libdispatch. This has the benefit of being able
-// to PostTask() and friends to a dispatch queue, while being reusable as a
-// dispatch_queue_t.
-//
-// One would use this class if an object lives exclusively on one thread but
-// needs a dispatch_queue_t for use in a system API. This ensures all dispatch
-// callbacks happen on the same thread as Closure tasks.
-//
-// A LibDispatchTaskRunner will continue to run until all references to the
-// underlying dispatch queue are released.
-//
-// Important Notes:
-//   - There is no MessageLoop running on this thread, and ::current() returns
-//     NULL.
-//   - No nested loops can be run, and all tasks are run non-nested.
-//   - Work scheduled via libdispatch runs at the same priority as and is
-//     interleaved with posted tasks, though FIFO order is guaranteed.
-//
-class BASE_EXPORT LibDispatchTaskRunner : public base::SingleThreadTaskRunner {
- public:
-  // Starts a new serial dispatch queue with a given name.
-  explicit LibDispatchTaskRunner(const char* name);
-
-  // base::TaskRunner:
-  bool PostDelayedTask(const tracked_objects::Location& from_here,
-                       const Closure& task,
-                       base::TimeDelta delay) override;
-  bool RunsTasksOnCurrentThread() const override;
-
-  // base::SequencedTaskRunner:
-  bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  const Closure& task,
-                                  base::TimeDelta delay) override;
-
-  // This blocks the calling thread until all work on the dispatch queue has
-  // been run and the queue has been destroyed. Destroying a queue requires
-  // ALL retained references to it to be released. Any new tasks posted to
-  // this thread after shutdown are dropped.
-  void Shutdown();
-
-  // Returns the dispatch queue associated with this task runner, for use with
-  // system APIs that take dispatch queues. The caller is responsible for
-  // retaining the result.
-  //
-  // All properties (context, finalizer, etc.) are managed by this class, and
-  // clients should only use the result of this for dispatch_async().
-  dispatch_queue_t GetDispatchQueue() const;
-
- protected:
-  ~LibDispatchTaskRunner() override;
-
- private:
-  static void Finalizer(void* context);
-
-  dispatch_queue_t queue_;
-
-  // The event on which Shutdown waits until Finalizer runs.
-  base::WaitableEvent queue_finalized_;
-};
-
-}  // namespace mac
-}  // namespace base
-
-#endif  // BASE_MAC_LIBDISPATCH_TASK_RUNNER_H_
diff --git a/base/mac/mac_logging.h b/base/mac/mac_logging.h
index f558902..30e43ea 100644
--- a/base/mac/mac_logging.h
+++ b/base/mac/mac_logging.h
@@ -29,6 +29,9 @@
 
 namespace logging {
 
+// Returns a UTF8 description from an OS X Status error.
+BASE_EXPORT std::string DescriptionFromOSStatus(OSStatus err);
+
 class BASE_EXPORT OSStatusLogMessage : public logging::LogMessage {
  public:
   OSStatusLogMessage(const char* file_path,
diff --git a/base/mac/mac_logging.mm b/base/mac/mac_logging.mm
new file mode 100644
index 0000000..f0d3c07
--- /dev/null
+++ b/base/mac/mac_logging.mm
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mac_logging.h"
+
+#import <Foundation/Foundation.h>
+
+#include <iomanip>
+
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include <CoreServices/CoreServices.h>
+#endif
+
+namespace logging {
+
+std::string DescriptionFromOSStatus(OSStatus err) {
+  NSError* error =
+      [NSError errorWithDomain:NSOSStatusErrorDomain code:err userInfo:nil];
+  return error.description.UTF8String;
+}
+
+OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
+                                       int line,
+                                       LogSeverity severity,
+                                       OSStatus status)
+    : LogMessage(file_path, line, severity),
+      status_(status) {
+}
+
+OSStatusLogMessage::~OSStatusLogMessage() {
+#if defined(OS_IOS)
+  // TODO(crbug.com/546375): Consider using NSError with NSOSStatusErrorDomain
+  // to try to get a description of the failure.
+  stream() << ": " << status_;
+#else
+  stream() << ": "
+           << DescriptionFromOSStatus(status_)
+           << " ("
+           << status_
+           << ")";
+#endif
+}
+
+}  // namespace logging
diff --git a/base/mac/mac_util.h b/base/mac/mac_util.h
index 7772e88..84948f7 100644
--- a/base/mac/mac_util.h
+++ b/base/mac/mac_util.h
@@ -11,13 +11,6 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/logging.h"
-
-#if defined(__OBJC__)
-#import <Foundation/Foundation.h>
-#else  // __OBJC__
-class NSImage;
-#endif  // __OBJC__
 
 namespace base {
 
@@ -73,12 +66,6 @@
 BASE_EXPORT void SwitchFullScreenModes(FullScreenMode from_mode,
                                        FullScreenMode to_mode);
 
-// Set the visibility of the cursor.
-BASE_EXPORT void SetCursorVisibility(bool visible);
-
-// Activates the process with the given PID.
-BASE_EXPORT void ActivateProcess(pid_t pid);
-
 // Returns true if this process is in the foreground, meaning that it's the
 // frontmost process, the one whose menu bar is shown at the top of the main
 // display.
@@ -126,81 +113,38 @@
 // "OrLater" variants to those that check for a specific version, unless you
 // know for sure that you need to check for a specific version.
 
-// Snow Leopard is Mac OS X 10.6, Darwin 10.
-BASE_EXPORT bool IsOSSnowLeopard();
-
-// Lion is Mac OS X 10.7, Darwin 11.
-BASE_EXPORT bool IsOSLion();
-BASE_EXPORT bool IsOSLionOrEarlier();
-BASE_EXPORT bool IsOSLionOrLater();
-
-// Mountain Lion is Mac OS X 10.8, Darwin 12.
-BASE_EXPORT bool IsOSMountainLion();
-BASE_EXPORT bool IsOSMountainLionOrEarlier();
-BASE_EXPORT bool IsOSMountainLionOrLater();
-
-// Mavericks is Mac OS X 10.9, Darwin 13.
+// Mavericks is OS X 10.9, Darwin 13.
 BASE_EXPORT bool IsOSMavericks();
-BASE_EXPORT bool IsOSMavericksOrEarlier();
-BASE_EXPORT bool IsOSMavericksOrLater();
 
-// Yosemite is Mac OS X 10.10, Darwin 14.
+// Yosemite is OS X 10.10, Darwin 14.
 BASE_EXPORT bool IsOSYosemite();
 BASE_EXPORT bool IsOSYosemiteOrEarlier();
 BASE_EXPORT bool IsOSYosemiteOrLater();
 
-// El Capitan is Mac OS X 10.11, Darwin 15.
+// El Capitan is OS X 10.11, Darwin 15.
 BASE_EXPORT bool IsOSElCapitan();
+BASE_EXPORT bool IsOSElCapitanOrEarlier();
 BASE_EXPORT bool IsOSElCapitanOrLater();
 
+// Sierra is macOS 10.12, Darwin 16.
+BASE_EXPORT bool IsOSSierra();
+BASE_EXPORT bool IsOSSierraOrLater();
+
 // This should be infrequently used. It only makes sense to use this to avoid
 // codepaths that are very likely to break on future (unreleased, untested,
 // unborn) OS releases, or to log when the OS is newer than any known version.
-BASE_EXPORT bool IsOSLaterThanElCapitan_DontCallThis();
+BASE_EXPORT bool IsOSLaterThanSierra_DontCallThis();
 
 // Inline functions that are redundant due to version ranges being mutually-
 // exclusive.
-inline bool IsOSLionOrEarlier() { return !IsOSMountainLionOrLater(); }
-inline bool IsOSMountainLionOrEarlier() { return !IsOSMavericksOrLater(); }
-inline bool IsOSMavericksOrEarlier() { return !IsOSYosemiteOrLater(); }
 inline bool IsOSYosemiteOrEarlier() { return !IsOSElCapitanOrLater(); }
+inline bool IsOSElCapitanOrEarlier() { return !IsOSSierraOrLater(); }
 
 // When the deployment target is set, the code produced cannot run on earlier
 // OS releases. That enables some of the IsOS* family to be implemented as
 // constant-value inline functions. The MAC_OS_X_VERSION_MIN_REQUIRED macro
 // contains the value of the deployment target.
 
-#if defined(MAC_OS_X_VERSION_10_7) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_7
-inline bool IsOSSnowLeopard() { return false; }
-inline bool IsOSLionOrLater() { return true; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_7) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_7
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_7
-inline bool IsOSLion() { return false; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_8) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_8
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_8
-inline bool IsOSMountainLionOrLater() { return true; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_8) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_8
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_8
-inline bool IsOSMountainLion() { return false; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_9) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_9
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_9
-inline bool IsOSMavericksOrLater() { return true; }
-#endif
-
 #if defined(MAC_OS_X_VERSION_10_9) && \
     MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_9
 #define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_9
@@ -229,7 +173,19 @@
     MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_11
 #define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_11
 inline bool IsOSElCapitan() { return false; }
-inline bool IsOSLaterThanElCapitan_DontCallThis() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_12
+inline bool IsOSSierraOrLater() { return true; }
+#endif
+
+#if defined(MAC_OS_X_VERSION_10_12) && \
+    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_12
+#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_12
+inline bool IsOSSierra() { return false; }
+inline bool IsOSLaterThanSierra_DontCallThis() { return true; }
 #endif
 
 // Retrieve the system's model identifier string from the IOKit registry:
diff --git a/base/mac/mach_port_broker.h b/base/mac/mach_port_broker.h
new file mode 100644
index 0000000..4554b6a
--- /dev/null
+++ b/base/mac/mach_port_broker.h
@@ -0,0 +1,108 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_BROKER_H_
+#define BASE_MAC_MACH_PORT_BROKER_H_
+
+#include <mach/mach.h>
+
+#include <map>
+#include <memory>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/mac/dispatch_source_mach.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/process/port_provider_mac.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// On OS X, the task port of a process is required to collect metrics about the
+// process, and to insert Mach ports into the process. Running |task_for_pid()|
+// is only allowed for privileged code. However, a process has port rights to
+// all its subprocesses, so let the child processes send their Mach port to the
+// parent over IPC.
+//
+// Mach ports can only be sent over Mach IPC, not over the |socketpair()| that
+// the regular IPC system uses. Hence, the child processes opens a Mach
+// connection shortly after launching and ipc their mach data to the parent
+// process. A single |MachPortBroker| with a given name is expected to exist in
+// the parent process.
+//
+// Since this data arrives over a separate channel, it is not available
+// immediately after a child process has been started.
+class BASE_EXPORT MachPortBroker : public base::PortProvider {
+ public:
+  // For use in child processes. This will send the task port of the current
+  // process over Mach IPC to the port registered by name (via this class) in
+  // the parent process. Returns true if the message was sent successfully
+  // and false if otherwise.
+  static bool ChildSendTaskPortToParent(const std::string& name);
+
+  // Returns the Mach port name to use when sending or receiving messages.
+  // Does the Right Thing in the browser and in child processes.
+  static std::string GetMachPortName(const std::string& name, bool is_child);
+
+  MachPortBroker(const std::string& name);
+  ~MachPortBroker() override;
+
+  // Performs any initialization work.
+  bool Init();
+
+  // Adds a placeholder to the map for the given pid with MACH_PORT_NULL.
+  // Callers are expected to later update the port with FinalizePid(). Callers
+  // MUST acquire the lock given by GetLock() before calling this method (and
+  // release the lock afterwards).
+  void AddPlaceholderForPid(base::ProcessHandle pid);
+
+  // Removes |pid| from the task port map. Callers MUST acquire the lock given
+  // by GetLock() before calling this method (and release the lock afterwards).
+  void InvalidatePid(base::ProcessHandle pid);
+
+  // The lock that protects this MachPortBroker object. Callers MUST acquire
+  // and release this lock around calls to AddPlaceholderForPid(),
+  // InvalidatePid(), and FinalizePid();
+  base::Lock& GetLock() { return lock_; }
+
+  // Implement |base::PortProvider|.
+  mach_port_t TaskForPid(base::ProcessHandle process) const override;
+
+ private:
+  friend class MachPortBrokerTest;
+
+  // Message handler that is invoked on |dispatch_source_| when an
+  // incoming message needs to be received.
+  void HandleRequest();
+
+  // Updates the mapping for |pid| to include the given |mach_info|.  Does
+  // nothing if PlaceholderForPid() has not already been called for the given
+  // |pid|. Callers MUST acquire the lock given by GetLock() before calling
+  // this method (and release the lock afterwards).
+  void FinalizePid(base::ProcessHandle pid, mach_port_t task_port);
+
+  // Name used to identify a particular port broker.
+  const std::string name_;
+
+  // The Mach port on which the server listens.
+  base::mac::ScopedMachReceiveRight server_port_;
+
+  // The dispatch source and queue on which Mach messages will be received.
+  std::unique_ptr<base::DispatchSourceMach> dispatch_source_;
+
+  // Stores mach info for every process in the broker.
+  typedef std::map<base::ProcessHandle, mach_port_t> MachMap;
+  MachMap mach_map_;
+
+  // Mutex that guards |mach_map_|.
+  mutable base::Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(MachPortBroker);
+};
+
+}  // namespace base
+
+#endif  // BASE_MAC_MACH_PORT_BROKER_H_
diff --git a/base/mac/mach_port_broker.mm b/base/mac/mach_port_broker.mm
new file mode 100644
index 0000000..bd47017
--- /dev/null
+++ b/base/mac/mach_port_broker.mm
@@ -0,0 +1,189 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include <bsm/libbsm.h>
+#include <servers/bootstrap.h>
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+// Mach message structure used in the child as a sending message.
+struct MachPortBroker_ChildSendMsg {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t child_task_port;
+};
+
+// Complement to the ChildSendMsg, this is used in the parent for receiving
+// a message. Contains a message trailer with audit information.
+struct MachPortBroker_ParentRecvMsg : public MachPortBroker_ChildSendMsg {
+  mach_msg_audit_trailer_t trailer;
+};
+
+}  // namespace
+
+// static
+bool MachPortBroker::ChildSendTaskPortToParent(const std::string& name) {
+  // Look up the named MachPortBroker port that's been registered with the
+  // bootstrap server.
+  mach_port_t parent_port;
+  kern_return_t kr = bootstrap_look_up(bootstrap_port,
+      const_cast<char*>(GetMachPortName(name, true).c_str()), &parent_port);
+  if (kr != KERN_SUCCESS) {
+    BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_look_up";
+    return false;
+  }
+  base::mac::ScopedMachSendRight scoped_right(parent_port);
+
+  // Create the check in message. This will copy a send right on this process'
+  // (the child's) task port and send it to the parent.
+  MachPortBroker_ChildSendMsg msg;
+  bzero(&msg, sizeof(msg));
+  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND) |
+                         MACH_MSGH_BITS_COMPLEX;
+  msg.header.msgh_remote_port = parent_port;
+  msg.header.msgh_size = sizeof(msg);
+  msg.body.msgh_descriptor_count = 1;
+  msg.child_task_port.name = mach_task_self();
+  msg.child_task_port.disposition = MACH_MSG_TYPE_PORT_SEND;
+  msg.child_task_port.type = MACH_MSG_PORT_DESCRIPTOR;
+
+  kr = mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg),
+      0, MACH_PORT_NULL, 100 /*milliseconds*/, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "mach_msg";
+    return false;
+  }
+
+  return true;
+}
+
+// static
+std::string MachPortBroker::GetMachPortName(const std::string& name,
+                                            bool is_child) {
+  // In child processes, use the parent's pid.
+  const pid_t pid = is_child ? getppid() : getpid();
+  return base::StringPrintf(
+      "%s.%s.%d", base::mac::BaseBundleID(), name.c_str(), pid);
+}
+
+mach_port_t MachPortBroker::TaskForPid(base::ProcessHandle pid) const {
+  base::AutoLock lock(lock_);
+  MachPortBroker::MachMap::const_iterator it = mach_map_.find(pid);
+  if (it == mach_map_.end())
+    return MACH_PORT_NULL;
+  return it->second;
+}
+
+MachPortBroker::MachPortBroker(const std::string& name) : name_(name) {}
+
+MachPortBroker::~MachPortBroker() {}
+
+bool MachPortBroker::Init() {
+  DCHECK(server_port_.get() == MACH_PORT_NULL);
+
+  // Check in with launchd and publish the service name.
+  mach_port_t port;
+  kern_return_t kr = bootstrap_check_in(
+      bootstrap_port, GetMachPortName(name_, false).c_str(), &port);
+  if (kr != KERN_SUCCESS) {
+    BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_check_in";
+    return false;
+  }
+  server_port_.reset(port);
+
+  // Start the dispatch source.
+  std::string queue_name =
+      base::StringPrintf("%s.MachPortBroker", base::mac::BaseBundleID());
+  dispatch_source_.reset(new base::DispatchSourceMach(
+      queue_name.c_str(), server_port_.get(), ^{ HandleRequest(); }));
+  dispatch_source_->Resume();
+
+  return true;
+}
+
+void MachPortBroker::AddPlaceholderForPid(base::ProcessHandle pid) {
+  lock_.AssertAcquired();
+  DCHECK_EQ(0u, mach_map_.count(pid));
+  mach_map_[pid] = MACH_PORT_NULL;
+}
+
+void MachPortBroker::InvalidatePid(base::ProcessHandle pid) {
+  lock_.AssertAcquired();
+
+  MachMap::iterator mach_it = mach_map_.find(pid);
+  if (mach_it != mach_map_.end()) {
+    kern_return_t kr = mach_port_deallocate(mach_task_self(), mach_it->second);
+    MACH_LOG_IF(WARNING, kr != KERN_SUCCESS, kr) << "mach_port_deallocate";
+    mach_map_.erase(mach_it);
+  }
+}
+
+void MachPortBroker::HandleRequest() {
+  MachPortBroker_ParentRecvMsg msg;
+  bzero(&msg, sizeof(msg));
+  msg.header.msgh_size = sizeof(msg);
+  msg.header.msgh_local_port = server_port_.get();
+
+  const mach_msg_option_t options = MACH_RCV_MSG |
+      MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_AUDIT) |
+      MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT);
+
+  kern_return_t kr = mach_msg(&msg.header,
+                              options,
+                              0,
+                              sizeof(msg),
+                              server_port_.get(),
+                              MACH_MSG_TIMEOUT_NONE,
+                              MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "mach_msg";
+    return;
+  }
+
+  // Use the kernel audit information to make sure this message is from
+  // a task that this process spawned. The kernel audit token contains the
+  // unspoofable pid of the task that sent the message.
+  //
+  // TODO(rsesek): In the 10.7 SDK, there's audit_token_to_pid().
+  pid_t child_pid;
+  audit_token_to_au32(msg.trailer.msgh_audit,
+      NULL, NULL, NULL, NULL, NULL, &child_pid, NULL, NULL);
+
+  mach_port_t child_task_port = msg.child_task_port.name;
+
+  // Take the lock and update the broker information.
+  {
+    base::AutoLock lock(lock_);
+    FinalizePid(child_pid, child_task_port);
+  }
+  NotifyObservers(child_pid);
+}
+
+void MachPortBroker::FinalizePid(base::ProcessHandle pid,
+                                 mach_port_t task_port) {
+  lock_.AssertAcquired();
+
+  MachMap::iterator it = mach_map_.find(pid);
+  if (it == mach_map_.end()) {
+    // Do nothing for unknown pids.
+    LOG(ERROR) << "Unknown process " << pid << " is sending Mach IPC messages!";
+    return;
+  }
+
+  DCHECK(it->second == MACH_PORT_NULL);
+  if (it->second == MACH_PORT_NULL)
+    it->second = task_port;
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_broker_unittest.cc b/base/mac/mach_port_broker_unittest.cc
new file mode 100644
index 0000000..bff8eb6
--- /dev/null
+++ b/base/mac/mach_port_broker_unittest.cc
@@ -0,0 +1,133 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include "base/command_line.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+const char kBootstrapPortName[] = "thisisatest";
+}
+
+class MachPortBrokerTest : public testing::Test,
+                           public base::PortProvider::Observer {
+ public:
+  MachPortBrokerTest()
+      : broker_(kBootstrapPortName),
+        event_(base::WaitableEvent::ResetPolicy::MANUAL,
+               base::WaitableEvent::InitialState::NOT_SIGNALED),
+        received_process_(kNullProcessHandle) {
+    broker_.AddObserver(this);
+  }
+  ~MachPortBrokerTest() override {
+    broker_.RemoveObserver(this);
+  }
+
+  // Helper function to acquire/release locks and call |PlaceholderForPid()|.
+  void AddPlaceholderForPid(base::ProcessHandle pid) {
+    base::AutoLock lock(broker_.GetLock());
+    broker_.AddPlaceholderForPid(pid);
+  }
+
+  // Helper function to acquire/release locks and call |FinalizePid()|.
+  void FinalizePid(base::ProcessHandle pid,
+                   mach_port_t task_port) {
+    base::AutoLock lock(broker_.GetLock());
+    broker_.FinalizePid(pid, task_port);
+  }
+
+  void WaitForTaskPort() {
+    event_.Wait();
+  }
+
+  // base::PortProvider::Observer:
+  void OnReceivedTaskPort(ProcessHandle process) override {
+    received_process_ = process;
+    event_.Signal();
+  }
+
+ protected:
+  MachPortBroker broker_;
+  WaitableEvent event_;
+  ProcessHandle received_process_;
+};
+
+TEST_F(MachPortBrokerTest, Locks) {
+  // Acquire and release the locks.  Nothing bad should happen.
+  base::AutoLock lock(broker_.GetLock());
+}
+
+TEST_F(MachPortBrokerTest, AddPlaceholderAndFinalize) {
+  // Add a placeholder for PID 1.
+  AddPlaceholderForPid(1);
+  EXPECT_EQ(0u, broker_.TaskForPid(1));
+
+  // Finalize PID 1.
+  FinalizePid(1, 100u);
+  EXPECT_EQ(100u, broker_.TaskForPid(1));
+
+  // Should be no entry for PID 2.
+  EXPECT_EQ(0u, broker_.TaskForPid(2));
+}
+
+TEST_F(MachPortBrokerTest, FinalizeUnknownPid) {
+  // Finalizing an entry for an unknown pid should not add it to the map.
+  FinalizePid(1u, 100u);
+  EXPECT_EQ(0u, broker_.TaskForPid(1u));
+}
+
+MULTIPROCESS_TEST_MAIN(MachPortBrokerTestChild) {
+  CHECK(base::MachPortBroker::ChildSendTaskPortToParent(kBootstrapPortName));
+  return 0;
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
+  ASSERT_TRUE(broker_.Init());
+  CommandLine command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  broker_.GetLock().Acquire();
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
+      "MachPortBrokerTestChild", command_line, LaunchOptions());
+  broker_.AddPlaceholderForPid(test_child_process.Handle());
+  broker_.GetLock().Release();
+
+  WaitForTaskPort();
+  EXPECT_EQ(test_child_process.Handle(), received_process_);
+
+  int rv = -1;
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+
+  EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
+            broker_.TaskForPid(test_child_process.Handle()));
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
+  ASSERT_TRUE(broker_.Init());
+  CommandLine command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  broker_.GetLock().Acquire();
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
+      "MachPortBrokerTestChild", command_line, LaunchOptions());
+  broker_.GetLock().Release();
+
+  int rv = -1;
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+
+  EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
+            broker_.TaskForPid(test_child_process.Handle()));
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_util.cc b/base/mac/mach_port_util.cc
new file mode 100644
index 0000000..0eee210
--- /dev/null
+++ b/base/mac/mach_port_util.cc
@@ -0,0 +1,136 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_util.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace {
+
+// Struct for sending a complex Mach message.
+struct MachSendComplexMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+};
+
+// Struct for receiving a complex message.
+struct MachReceiveComplexMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+  mach_msg_trailer_t trailer;
+};
+
+}  // namespace
+
+kern_return_t SendMachPort(mach_port_t endpoint,
+                           mach_port_t port_to_send,
+                           int disposition) {
+  MachSendComplexMessage send_msg;
+  send_msg.header.msgh_bits =
+      MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0) | MACH_MSGH_BITS_COMPLEX;
+  send_msg.header.msgh_size = sizeof(send_msg);
+  send_msg.header.msgh_remote_port = endpoint;
+  send_msg.header.msgh_local_port = MACH_PORT_NULL;
+  send_msg.header.msgh_reserved = 0;
+  send_msg.header.msgh_id = 0;
+  send_msg.body.msgh_descriptor_count = 1;
+  send_msg.data.name = port_to_send;
+  send_msg.data.disposition = disposition;
+  send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+
+  kern_return_t kr =
+      mach_msg(&send_msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT,
+               send_msg.header.msgh_size,
+               0,                // receive limit
+               MACH_PORT_NULL,   // receive name
+               0,                // timeout
+               MACH_PORT_NULL);  // notification port
+
+  if (kr != KERN_SUCCESS)
+    mach_port_deallocate(mach_task_self(), endpoint);
+
+  return kr;
+}
+
+base::mac::ScopedMachSendRight ReceiveMachPort(mach_port_t port_to_listen_on) {
+  MachReceiveComplexMessage recv_msg;
+  mach_msg_header_t* recv_hdr = &recv_msg.header;
+  recv_hdr->msgh_local_port = port_to_listen_on;
+  recv_hdr->msgh_size = sizeof(recv_msg);
+
+  kern_return_t kr =
+      mach_msg(recv_hdr, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
+               recv_hdr->msgh_size, port_to_listen_on, 0, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS)
+    return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+  if (recv_msg.header.msgh_id != 0)
+    return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+  return base::mac::ScopedMachSendRight(recv_msg.data.name);
+}
+
+mach_port_name_t CreateIntermediateMachPort(
+    mach_port_t task_port,
+    base::mac::ScopedMachSendRight port_to_insert,
+    MachCreateError* error_code) {
+  DCHECK_NE(mach_task_self(), task_port);
+  DCHECK_NE(static_cast<mach_port_name_t>(MACH_PORT_NULL), task_port);
+
+  // Make a port with receive rights in the destination task.
+  mach_port_name_t endpoint;
+  kern_return_t kr =
+      mach_port_allocate(task_port, MACH_PORT_RIGHT_RECEIVE, &endpoint);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_MAKE_RECEIVE_PORT;
+    return MACH_PORT_NULL;
+  }
+
+  // Change its message queue limit so that it accepts one message.
+  mach_port_limits limits = {};
+  limits.mpl_qlimit = 1;
+  kr = mach_port_set_attributes(task_port, endpoint, MACH_PORT_LIMITS_INFO,
+                                reinterpret_cast<mach_port_info_t>(&limits),
+                                MACH_PORT_LIMITS_INFO_COUNT);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_SET_ATTRIBUTES;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+
+  // Get a send right.
+  mach_port_t send_once_right;
+  mach_msg_type_name_t send_right_type;
+  kr =
+      mach_port_extract_right(task_port, endpoint, MACH_MSG_TYPE_MAKE_SEND_ONCE,
+                              &send_once_right, &send_right_type);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_EXTRACT_DEST_RIGHT;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+  DCHECK_EQ(static_cast<mach_msg_type_name_t>(MACH_MSG_TYPE_PORT_SEND_ONCE),
+            send_right_type);
+
+  // This call takes ownership of |send_once_right|.
+  kr = base::SendMachPort(
+      send_once_right, port_to_insert.get(), MACH_MSG_TYPE_COPY_SEND);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_SEND_MACH_PORT;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+
+  // Endpoint is intentionally leaked into the destination task. An IPC must be
+  // sent to the destination task so that it can clean up this port.
+  return endpoint;
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_util.h b/base/mac/mach_port_util.h
new file mode 100644
index 0000000..f7a7f32
--- /dev/null
+++ b/base/mac/mach_port_util.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_UTIL_H_
+#define BASE_MAC_MACH_PORT_UTIL_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/mac/scoped_mach_port.h"
+
+namespace base {
+
+enum class MachCreateError {
+    ERROR_MAKE_RECEIVE_PORT,
+    ERROR_SET_ATTRIBUTES,
+    ERROR_EXTRACT_DEST_RIGHT,
+    ERROR_SEND_MACH_PORT,
+};
+
+// Sends a Mach port to |dest_port|. Assumes that |dest_port| is a send once
+// right. Takes ownership of |dest_port|.
+BASE_EXPORT kern_return_t SendMachPort(mach_port_t dest_port,
+                                       mach_port_t port_to_send,
+                                       int disposition);
+
+// Receives a Mach port from |port_to_listen_on|, which should have exactly one
+// queued message. Returns |MACH_PORT_NULL| on any error.
+BASE_EXPORT base::mac::ScopedMachSendRight ReceiveMachPort(
+    mach_port_t port_to_listen_on);
+
+// Creates an intermediate Mach port in |task_port| and sends |port_to_insert|
+// as a mach_msg to the intermediate Mach port.
+// |task_port| is the task port of another process.
+// |port_to_insert| must be a send right in the current task's name space.
+// Returns the intermediate port on success, and MACH_PORT_NULL on failure.
+// On failure, |error_code| is set if not null.
+// This method takes ownership of |port_to_insert|. On success, ownership is
+// passed to the intermediate Mach port.
+BASE_EXPORT mach_port_name_t CreateIntermediateMachPort(
+    mach_port_t task_port,
+    base::mac::ScopedMachSendRight port_to_insert,
+    MachCreateError* error_code);
+
+}  // namespace base
+
+#endif  // BASE_MAC_MACH_PORT_UTIL_H_
diff --git a/base/mac/scoped_authorizationref.h b/base/mac/scoped_authorizationref.h
index 39afa8c..03cde86 100644
--- a/base/mac/scoped_authorizationref.h
+++ b/base/mac/scoped_authorizationref.h
@@ -61,10 +61,9 @@
     authorization_ = temp;
   }
 
-  // ScopedAuthorizationRef::release() is like scoped_ptr<>::release.  It is
-  // NOT a wrapper for AuthorizationFree().  To force a
-  // ScopedAuthorizationRef object to call AuthorizationFree(), use
-  // ScopedAuthorizationRef::reset().
+  // ScopedAuthorizationRef::release() is like std::unique_ptr<>::release. It is
+  // NOT a wrapper for AuthorizationFree(). To force a ScopedAuthorizationRef
+  // object to call AuthorizationFree(), use ScopedAuthorizationRef::reset().
   AuthorizationRef release() WARN_UNUSED_RESULT {
     AuthorizationRef temp = authorization_;
     authorization_ = NULL;
diff --git a/base/mac/scoped_block.h b/base/mac/scoped_block.h
index bc2688f..8199677 100644
--- a/base/mac/scoped_block.h
+++ b/base/mac/scoped_block.h
@@ -9,6 +9,12 @@
 
 #include "base/mac/scoped_typeref.h"
 
+#if defined(__has_feature) && __has_feature(objc_arc)
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) (__bridge TYPE)(VALUE)
+#else
+#define BASE_MAC_BRIDGE_CAST(TYPE, VALUE) VALUE
+#endif
+
 namespace base {
 namespace mac {
 
@@ -17,8 +23,13 @@
 template <typename B>
 struct ScopedBlockTraits {
   static B InvalidValue() { return nullptr; }
-  static B Retain(B block) { return Block_copy(block); }
-  static void Release(B block) { Block_release(block); }
+  static B Retain(B block) {
+    return BASE_MAC_BRIDGE_CAST(
+        B, Block_copy(BASE_MAC_BRIDGE_CAST(const void*, block)));
+  }
+  static void Release(B block) {
+    Block_release(BASE_MAC_BRIDGE_CAST(const void*, block));
+  }
 };
 
 }  // namespace internal
@@ -32,4 +43,6 @@
 }  // namespace mac
 }  // namespace base
 
+#undef BASE_MAC_BRIDGE_CAST
+
 #endif  // BASE_MAC_SCOPED_BLOCK_H_
diff --git a/base/mac/scoped_cftyperef.h b/base/mac/scoped_cftyperef.h
index 1be0fbe..ccbc5cf 100644
--- a/base/mac/scoped_cftyperef.h
+++ b/base/mac/scoped_cftyperef.h
@@ -11,10 +11,10 @@
 
 namespace base {
 
-// ScopedCFTypeRef<> is patterned after scoped_ptr<>, but maintains ownership
-// of a CoreFoundation object: any object that can be represented as a
-// CFTypeRef.  Style deviations here are solely for compatibility with
-// scoped_ptr<>'s interface, with which everyone is already familiar.
+// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
+// ownership of a CoreFoundation object: any object that can be represented
+// as a CFTypeRef.  Style deviations here are solely for compatibility with
+// std::unique_ptr<>'s interface, with which everyone is already familiar.
 //
 // By default, ScopedCFTypeRef<> takes ownership of an object (in the
 // constructor or in reset()) by taking over the caller's existing ownership
diff --git a/base/mac/scoped_dispatch_object.h b/base/mac/scoped_dispatch_object.h
new file mode 100644
index 0000000..5f5d517
--- /dev/null
+++ b/base/mac/scoped_dispatch_object.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
+#define BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
+
+#include <dispatch/dispatch.h>
+
+#include "base/mac/scoped_typeref.h"
+
+namespace base {
+
+namespace internal {
+
+template <typename T>
+struct ScopedDispatchObjectTraits {
+  static T InvalidValue() { return nullptr; }
+  static T Retain(T object) {
+    dispatch_retain(object);
+    return object;
+  }
+  static void Release(T object) {
+    dispatch_release(object);
+  }
+};
+
+}  // namepsace internal
+
+template <typename T>
+using ScopedDispatchObject =
+    ScopedTypeRef<T, internal::ScopedDispatchObjectTraits<T>>;
+
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_DISPATCH_OBJECT_H_
diff --git a/base/mac/scoped_launch_data.h b/base/mac/scoped_launch_data.h
index da62006..f4db330 100644
--- a/base/mac/scoped_launch_data.h
+++ b/base/mac/scoped_launch_data.h
@@ -21,7 +21,7 @@
 
 }  // namespace internal
 
-// Just like scoped_ptr<> but for launch_data_t.
+// Just like std::unique_ptr<> but for launch_data_t.
 using ScopedLaunchData =
     ScopedGeneric<launch_data_t, internal::ScopedLaunchDataTraits>;
 
diff --git a/base/mac/scoped_mach_vm.cc b/base/mac/scoped_mach_vm.cc
new file mode 100644
index 0000000..d52c77f
--- /dev/null
+++ b/base/mac/scoped_mach_vm.cc
@@ -0,0 +1,33 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/scoped_mach_vm.h"
+
+namespace base {
+namespace mac {
+
+void ScopedMachVM::reset(vm_address_t address, vm_size_t size) {
+  DCHECK_EQ(address % PAGE_SIZE, 0u);
+  DCHECK_EQ(size % PAGE_SIZE, 0u);
+
+  if (size_) {
+    if (address_ < address) {
+      vm_deallocate(mach_task_self(),
+                    address_,
+                    std::min(size_, address - address_));
+    }
+    if (address_ + size_ > address + size) {
+      vm_address_t deallocate_start = std::max(address_, address + size);
+      vm_deallocate(mach_task_self(),
+                    deallocate_start,
+                    address_ + size_ - deallocate_start);
+    }
+  }
+
+  address_ = address;
+  size_ = size;
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/base/mac/scoped_mach_vm.h b/base/mac/scoped_mach_vm.h
new file mode 100644
index 0000000..58a13f6
--- /dev/null
+++ b/base/mac/scoped_mach_vm.h
@@ -0,0 +1,93 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_MACH_VM_H_
+#define BASE_MAC_SCOPED_MACH_VM_H_
+
+#include <mach/mach.h>
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+
+// Use ScopedMachVM to supervise ownership of pages in the current process
+// through the Mach VM subsystem. Pages allocated with vm_allocate can be
+// released when exiting a scope with ScopedMachVM.
+//
+// The Mach VM subsystem operates on a page-by-page basis, and a single VM
+// allocation managed by a ScopedMachVM object may span multiple pages. As far
+// as Mach is concerned, allocated pages may be deallocated individually. This
+// is in contrast to higher-level allocators such as malloc, where the base
+// address of an allocation implies the size of an allocated block.
+// Consequently, it is not sufficient to just pass the base address of an
+// allocation to ScopedMachVM, it also needs to know the size of the
+// allocation. To avoid any confusion, both the base address and size must
+// be page-aligned.
+//
+// When dealing with Mach VM, base addresses will naturally be page-aligned,
+// but user-specified sizes may not be. If there's a concern that a size is
+// not page-aligned, use the mach_vm_round_page macro to correct it.
+//
+// Example:
+//
+//   vm_address_t address = 0;
+//   vm_size_t size = 12345;  // This requested size is not page-aligned.
+//   kern_return_t kr =
+//       vm_allocate(mach_task_self(), &address, size, VM_FLAGS_ANYWHERE);
+//   if (kr != KERN_SUCCESS) {
+//     return false;
+//   }
+//   ScopedMachVM vm_owner(address, mach_vm_round_page(size));
+
+namespace base {
+namespace mac {
+
+class BASE_EXPORT ScopedMachVM {
+ public:
+  explicit ScopedMachVM(vm_address_t address = 0, vm_size_t size = 0)
+      : address_(address), size_(size) {
+    DCHECK_EQ(address % PAGE_SIZE, 0u);
+    DCHECK_EQ(size % PAGE_SIZE, 0u);
+  }
+
+  ~ScopedMachVM() {
+    if (size_) {
+      vm_deallocate(mach_task_self(), address_, size_);
+    }
+  }
+
+  void reset(vm_address_t address = 0, vm_size_t size = 0);
+
+  vm_address_t address() const {
+    return address_;
+  }
+
+  vm_size_t size() const {
+    return size_;
+  }
+
+  void swap(ScopedMachVM& that) {
+    std::swap(address_, that.address_);
+    std::swap(size_, that.size_);
+  }
+
+  void release() {
+    address_ = 0;
+    size_ = 0;
+  }
+
+ private:
+  vm_address_t address_;
+  vm_size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedMachVM);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MAC_SCOPED_MACH_VM_H_
diff --git a/base/mac/scoped_nsobject.h b/base/mac/scoped_nsobject.h
index 04c5877..cc54aa0 100644
--- a/base/mac/scoped_nsobject.h
+++ b/base/mac/scoped_nsobject.h
@@ -12,17 +12,20 @@
 // singled out because it is most typically included from other header files.
 #import <Foundation/NSObject.h>
 
+#include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/mac/scoped_typeref.h"
 
+#if !defined(__has_feature) || !__has_feature(objc_arc)
 @class NSAutoreleasePool;
+#endif
 
 namespace base {
 
-// scoped_nsobject<> is patterned after scoped_ptr<>, but maintains ownership
-// of an NSObject subclass object.  Style deviations here are solely for
-// compatibility with scoped_ptr<>'s interface, with which everyone is already
-// familiar.
+// scoped_nsobject<> is patterned after std::unique_ptr<>, but maintains
+// ownership of an NSObject subclass object.  Style deviations here are solely
+// for compatibility with std::unique_ptr<>'s interface, with which everyone is
+// already familiar.
 //
 // scoped_nsobject<> takes ownership of an object (in the constructor or in
 // reset()) by taking over the caller's existing ownership claim.  The caller
@@ -38,14 +41,39 @@
 // scoped_nsautorelease_pool.h instead.
 // We check for bad uses of scoped_nsobject and NSAutoreleasePool at compile
 // time with a template specialization (see below).
+//
+// If Automatic Reference Counting (aka ARC) is enabled then the ownership
+// policy is not controllable by the user as ARC make it really difficult to
+// transfer ownership (the reference passed to scoped_nsobject constructor is
+// sunk by ARC and __attribute((ns_consumed)) appears to not work correctly
+// with Objective-C++ see https://llvm.org/bugs/show_bug.cgi?id=27887). Due to
+// that, the policy is always to |RETAIN| when using ARC.
 
 namespace internal {
 
+BASE_EXPORT id ScopedNSProtocolTraitsRetain(__unsafe_unretained id obj)
+    __attribute((ns_returns_not_retained));
+BASE_EXPORT id ScopedNSProtocolTraitsAutoRelease(__unsafe_unretained id obj)
+    __attribute((ns_returns_not_retained));
+BASE_EXPORT void ScopedNSProtocolTraitsRelease(__unsafe_unretained id obj);
+
+// Traits for ScopedTypeRef<>. As this class may be compiled from file with
+// Automatic Reference Counting enable or not all methods have annotation to
+// enforce the same code generation in both case (in particular, the Retain
+// method uses ns_returns_not_retained to prevent ARC to insert a -release
+// call on the returned value and thus defeating the -retain).
 template <typename NST>
 struct ScopedNSProtocolTraits {
-  static NST InvalidValue() { return nil; }
-  static NST Retain(NST nst) { return [nst retain]; }
-  static void Release(NST nst) { [nst release]; }
+  static NST InvalidValue() __attribute((ns_returns_not_retained)) {
+    return nil;
+  }
+  static NST Retain(__unsafe_unretained NST nst)
+      __attribute((ns_returns_not_retained)) {
+    return ScopedNSProtocolTraitsRetain(nst);
+  }
+  static void Release(__unsafe_unretained NST nst) {
+    ScopedNSProtocolTraitsRelease(nst);
+  }
 };
 
 }  // namespace internal
@@ -54,11 +82,49 @@
 class scoped_nsprotocol
     : public ScopedTypeRef<NST, internal::ScopedNSProtocolTraits<NST>> {
  public:
-  using ScopedTypeRef<NST,
-                      internal::ScopedNSProtocolTraits<NST>>::ScopedTypeRef;
+  using Traits = internal::ScopedNSProtocolTraits<NST>;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  explicit scoped_nsprotocol(
+      NST object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : ScopedTypeRef<NST, Traits>(object, policy) {}
+#else
+  explicit scoped_nsprotocol(NST object = Traits::InvalidValue())
+      : ScopedTypeRef<NST, Traits>(object, base::scoped_policy::RETAIN) {}
+#endif
+
+  scoped_nsprotocol(const scoped_nsprotocol<NST>& that)
+      : ScopedTypeRef<NST, Traits>(that) {}
+
+  template <typename NSR>
+  explicit scoped_nsprotocol(const scoped_nsprotocol<NSR>& that_as_subclass)
+      : ScopedTypeRef<NST, Traits>(that_as_subclass) {}
+
+  scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
+      : ScopedTypeRef<NST, Traits>(that) {}
+
+  scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
+    ScopedTypeRef<NST, Traits>::operator=(that);
+    return *this;
+  }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  void reset(NST object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    ScopedTypeRef<NST, Traits>::reset(object, policy);
+  }
+#else
+  void reset(NST object = Traits::InvalidValue()) {
+    ScopedTypeRef<NST, Traits>::reset(object, base::scoped_policy::RETAIN);
+  }
+#endif
 
   // Shift reference to the autorelease pool to be released later.
-  NST autorelease() { return [this->release() autorelease]; }
+  NST autorelease() __attribute((ns_returns_not_retained)) {
+    return internal::ScopedNSProtocolTraitsAutoRelease(this->release());
+  }
 };
 
 // Free functions
@@ -80,17 +146,92 @@
 template <typename NST>
 class scoped_nsobject : public scoped_nsprotocol<NST*> {
  public:
-  using scoped_nsprotocol<NST*>::scoped_nsprotocol;
+  using Traits = typename scoped_nsprotocol<NST*>::Traits;
 
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  explicit scoped_nsobject(
+      NST* object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : scoped_nsprotocol<NST*>(object, policy) {}
+#else
+  explicit scoped_nsobject(NST* object = Traits::InvalidValue())
+      : scoped_nsprotocol<NST*>(object) {}
+#endif
+
+  scoped_nsobject(const scoped_nsobject<NST>& that)
+      : scoped_nsprotocol<NST*>(that) {}
+
+  template <typename NSR>
+  explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+      : scoped_nsprotocol<NST*>(that_as_subclass) {}
+
+  scoped_nsobject(scoped_nsobject<NST>&& that)
+      : scoped_nsprotocol<NST*>(that) {}
+
+  scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
+    scoped_nsprotocol<NST*>::operator=(that);
+    return *this;
+  }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  void reset(NST* object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    scoped_nsprotocol<NST*>::reset(object, policy);
+  }
+#else
+  void reset(NST* object = Traits::InvalidValue()) {
+    scoped_nsprotocol<NST*>::reset(object);
+  }
+#endif
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
   static_assert(std::is_same<NST, NSAutoreleasePool>::value == false,
                 "Use ScopedNSAutoreleasePool instead");
+#endif
 };
 
 // Specialization to make scoped_nsobject<id> work.
 template<>
 class scoped_nsobject<id> : public scoped_nsprotocol<id> {
  public:
-  using scoped_nsprotocol<id>::scoped_nsprotocol;
+  using Traits = typename scoped_nsprotocol<id>::Traits;
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  explicit scoped_nsobject(
+      id object = Traits::InvalidValue(),
+      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
+      : scoped_nsprotocol<id>(object, policy) {}
+#else
+  explicit scoped_nsobject(id object = Traits::InvalidValue())
+      : scoped_nsprotocol<id>(object) {}
+#endif
+
+  scoped_nsobject(const scoped_nsobject<id>& that)
+      : scoped_nsprotocol<id>(that) {}
+
+  template <typename NSR>
+  explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
+      : scoped_nsprotocol<id>(that_as_subclass) {}
+
+  scoped_nsobject(scoped_nsobject<id>&& that) : scoped_nsprotocol<id>(that) {}
+
+  scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
+    scoped_nsprotocol<id>::operator=(that);
+    return *this;
+  }
+
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+  void reset(id object = Traits::InvalidValue(),
+             base::scoped_policy::OwnershipPolicy policy =
+                 base::scoped_policy::ASSUME) {
+    scoped_nsprotocol<id>::reset(object, policy);
+  }
+#else
+  void reset(id object = Traits::InvalidValue()) {
+    scoped_nsprotocol<id>::reset(object);
+  }
+#endif
 };
 
 }  // namespace base
diff --git a/base/mac/scoped_typeref.h b/base/mac/scoped_typeref.h
index 4211414..b8d8a14 100644
--- a/base/mac/scoped_typeref.h
+++ b/base/mac/scoped_typeref.h
@@ -11,7 +11,7 @@
 
 namespace base {
 
-// ScopedTypeRef<> is patterned after scoped_ptr<>, but maintains a ownership
+// ScopedTypeRef<> is patterned after std::unique_ptr<>, but maintains ownership
 // of a reference to any type that is maintained by Retain and Release methods.
 //
 // The Traits structure must provide the Retain and Release methods for type T.
@@ -53,8 +53,8 @@
  public:
   typedef T element_type;
 
-  ScopedTypeRef(
-      T object = Traits::InvalidValue(),
+  explicit ScopedTypeRef(
+      __unsafe_unretained T object = Traits::InvalidValue(),
       base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
       : object_(object) {
     if (object_ && policy == base::scoped_policy::RETAIN)
@@ -67,6 +67,18 @@
       object_ = Traits::Retain(object_);
   }
 
+  // This allows passing an object to a function that takes its superclass.
+  template <typename R, typename RTraits>
+  explicit ScopedTypeRef(const ScopedTypeRef<R, RTraits>& that_as_subclass)
+      : object_(that_as_subclass.get()) {
+    if (object_)
+      object_ = Traits::Retain(object_);
+  }
+
+  ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.object_) {
+    that.object_ = Traits::InvalidValue();
+  }
+
   ~ScopedTypeRef() {
     if (object_)
       Traits::Release(object_);
@@ -85,9 +97,9 @@
     return &object_;
   }
 
-  void reset(T object = Traits::InvalidValue(),
+  void reset(__unsafe_unretained T object = Traits::InvalidValue(),
              base::scoped_policy::OwnershipPolicy policy =
-                base::scoped_policy::ASSUME) {
+                 base::scoped_policy::ASSUME) {
     if (object && policy == base::scoped_policy::RETAIN)
       object = Traits::Retain(object);
     if (object_)
@@ -95,39 +107,31 @@
     object_ = object;
   }
 
-  bool operator==(T that) const {
-    return object_ == that;
-  }
+  bool operator==(__unsafe_unretained T that) const { return object_ == that; }
 
-  bool operator!=(T that) const {
-    return object_ != that;
-  }
+  bool operator!=(__unsafe_unretained T that) const { return object_ != that; }
 
-  operator T() const {
-    return object_;
-  }
+  operator T() const __attribute((ns_returns_not_retained)) { return object_; }
 
-  T get() const {
-    return object_;
-  }
+  T get() const __attribute((ns_returns_not_retained)) { return object_; }
 
   void swap(ScopedTypeRef& that) {
-    T temp = that.object_;
+    __unsafe_unretained T temp = that.object_;
     that.object_ = object_;
     object_ = temp;
   }
 
-  // ScopedTypeRef<>::release() is like scoped_ptr<>::release.  It is NOT
+  // ScopedTypeRef<>::release() is like std::unique_ptr<>::release.  It is NOT
   // a wrapper for Release().  To force a ScopedTypeRef<> object to call
   // Release(), use ScopedTypeRef<>::reset().
-  T release() WARN_UNUSED_RESULT {
-    T temp = object_;
+  T release() __attribute((ns_returns_not_retained)) WARN_UNUSED_RESULT {
+    __unsafe_unretained T temp = object_;
     object_ = Traits::InvalidValue();
     return temp;
   }
 
  private:
-  T object_;
+  __unsafe_unretained T object_;
 };
 
 }  // namespace base
diff --git a/base/mac/sdk_forward_declarations.h b/base/mac/sdk_forward_declarations.h
new file mode 100644
index 0000000..818a1d0
--- /dev/null
+++ b/base/mac/sdk_forward_declarations.h
@@ -0,0 +1,532 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains forward declarations for items in later SDKs than the
+// default one with which Chromium is built (currently 10.6).
+// If you call any function from this header, be sure to check at runtime for
+// respondsToSelector: before calling these functions (else your code will crash
+// on older OS X versions that chrome still supports).
+
+#ifndef BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
+#define BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
+
+#import <AppKit/AppKit.h>
+#import <CoreWLAN/CoreWLAN.h>
+#import <ImageCaptureCore/ImageCaptureCore.h>
+#import <IOBluetooth/IOBluetooth.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+
+// ----------------------------------------------------------------------------
+// Either define or forward declare classes only available in OSX 10.7+.
+// ----------------------------------------------------------------------------
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
+
+@interface CWChannel : NSObject
+@end
+
+@interface CBPeripheral : NSObject
+@end
+
+@interface CBCentralManager : NSObject
+@end
+
+@interface CBUUID : NSObject
+@end
+
+#else
+
+@class CWChannel;
+@class CBPeripheral;
+@class CBCentralManager;
+@class CBUUID;
+
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
+
+@interface NSUUID : NSObject
+@end
+
+#else
+
+@class NSUUID;
+
+#endif  // MAC_OS_X_VERSION_10_8
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
+
+// NSProgress is public API in 10.9, but a version of it exists and is usable
+// in 10.8.
+@interface NSProgress : NSObject
+@end
+
+@interface NSAppearance : NSObject
+@end
+
+#else
+
+@class NSProgress;
+@class NSAppearance;
+
+#endif  // MAC_OS_X_VERSION_10_9
+
+// ----------------------------------------------------------------------------
+// Define typedefs, enums, and protocols not available in the version of the
+// OSX SDK being compiled against.
+// ----------------------------------------------------------------------------
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
+
+enum {
+  NSEventPhaseNone = 0,  // event not associated with a phase.
+  NSEventPhaseBegan = 0x1 << 0,
+  NSEventPhaseStationary = 0x1 << 1,
+  NSEventPhaseChanged = 0x1 << 2,
+  NSEventPhaseEnded = 0x1 << 3,
+  NSEventPhaseCancelled = 0x1 << 4
+};
+typedef NSUInteger NSEventPhase;
+
+enum {
+  NSFullScreenWindowMask = 1 << 14,
+};
+
+enum {
+  NSApplicationPresentationFullScreen = 1 << 10,
+};
+
+enum {
+  NSWindowCollectionBehaviorFullScreenPrimary = 1 << 7,
+  NSWindowCollectionBehaviorFullScreenAuxiliary = 1 << 8,
+};
+
+enum {
+  NSEventSwipeTrackingLockDirection = 0x1 << 0,
+  NSEventSwipeTrackingClampGestureAmount = 0x1 << 1,
+};
+typedef NSUInteger NSEventSwipeTrackingOptions;
+
+enum {
+  NSWindowAnimationBehaviorDefault = 0,
+  NSWindowAnimationBehaviorNone = 2,
+  NSWindowAnimationBehaviorDocumentWindow = 3,
+  NSWindowAnimationBehaviorUtilityWindow = 4,
+  NSWindowAnimationBehaviorAlertPanel = 5
+};
+typedef NSInteger NSWindowAnimationBehavior;
+
+enum {
+  NSWindowDocumentVersionsButton = 6,
+  NSWindowFullScreenButton,
+};
+typedef NSUInteger NSWindowButton;
+
+enum CWChannelBand {
+  kCWChannelBandUnknown = 0,
+  kCWChannelBand2GHz = 1,
+  kCWChannelBand5GHz = 2,
+};
+
+enum {
+  kCWSecurityNone = 0,
+  kCWSecurityWEP = 1,
+  kCWSecurityWPAPersonal = 2,
+  kCWSecurityWPAPersonalMixed = 3,
+  kCWSecurityWPA2Personal = 4,
+  kCWSecurityPersonal = 5,
+  kCWSecurityDynamicWEP = 6,
+  kCWSecurityWPAEnterprise = 7,
+  kCWSecurityWPAEnterpriseMixed = 8,
+  kCWSecurityWPA2Enterprise = 9,
+  kCWSecurityEnterprise = 10,
+  kCWSecurityUnknown = NSIntegerMax,
+};
+
+typedef NSInteger CWSecurity;
+
+enum {
+  kBluetoothFeatureLESupportedController = (1 << 6L),
+};
+
+@protocol IOBluetoothDeviceInquiryDelegate
+- (void)deviceInquiryStarted:(IOBluetoothDeviceInquiry*)sender;
+- (void)deviceInquiryDeviceFound:(IOBluetoothDeviceInquiry*)sender
+                          device:(IOBluetoothDevice*)device;
+- (void)deviceInquiryComplete:(IOBluetoothDeviceInquiry*)sender
+                        error:(IOReturn)error
+                      aborted:(BOOL)aborted;
+@end
+
+enum {
+  CBPeripheralStateDisconnected = 0,
+  CBPeripheralStateConnecting,
+  CBPeripheralStateConnected,
+};
+typedef NSInteger CBPeripheralState;
+
+enum {
+  CBCentralManagerStateUnknown = 0,
+  CBCentralManagerStateResetting,
+  CBCentralManagerStateUnsupported,
+  CBCentralManagerStateUnauthorized,
+  CBCentralManagerStatePoweredOff,
+  CBCentralManagerStatePoweredOn,
+};
+typedef NSInteger CBCentralManagerState;
+
+@protocol CBCentralManagerDelegate;
+
+@protocol CBCentralManagerDelegate<NSObject>
+- (void)centralManagerDidUpdateState:(CBCentralManager*)central;
+- (void)centralManager:(CBCentralManager*)central
+    didDiscoverPeripheral:(CBPeripheral*)peripheral
+        advertisementData:(NSDictionary*)advertisementData
+                     RSSI:(NSNumber*)RSSI;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_8
+
+enum { NSEventPhaseMayBegin = 0x1 << 5 };
+
+#endif  // MAC_OS_X_VERSION_10_8
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_9
+
+enum {
+  NSWindowOcclusionStateVisible = 1UL << 1,
+};
+typedef NSUInteger NSWindowOcclusionState;
+
+enum { NSWorkspaceLaunchWithErrorPresentation = 0x00000040 };
+
+#endif  // MAC_OS_X_VERSION_10_9
+
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
+
+enum {
+  NSPressureBehaviorUnknown = -1,
+  NSPressureBehaviorPrimaryDefault = 0,
+  NSPressureBehaviorPrimaryClick = 1,
+  NSPressureBehaviorPrimaryGeneric = 2,
+  NSPressureBehaviorPrimaryAccelerator = 3,
+  NSPressureBehaviorPrimaryDeepClick = 5,
+  NSPressureBehaviorPrimaryDeepDrag = 6
+};
+typedef NSInteger NSPressureBehavior;
+
+@interface NSPressureConfiguration : NSObject
+- (instancetype)initWithPressureBehavior:(NSPressureBehavior)pressureBehavior;
+@end
+
+#endif // MAC_OS_X_VERSION_10_11
+
+// ----------------------------------------------------------------------------
+// Define NSStrings only available in newer versions of the OSX SDK to force
+// them to be statically linked.
+// ----------------------------------------------------------------------------
+
+extern "C" {
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+BASE_EXPORT extern NSString* const NSWindowWillEnterFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowWillExitFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowDidEnterFullScreenNotification;
+BASE_EXPORT extern NSString* const NSWindowDidExitFullScreenNotification;
+BASE_EXPORT extern NSString* const
+    NSWindowDidChangeBackingPropertiesNotification;
+BASE_EXPORT extern NSString* const CBAdvertisementDataServiceDataKey;
+BASE_EXPORT extern NSString* const CBAdvertisementDataServiceUUIDsKey;
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+BASE_EXPORT extern NSString* const NSWindowDidChangeOcclusionStateNotification;
+BASE_EXPORT extern NSString* const CBAdvertisementDataOverflowServiceUUIDsKey;
+BASE_EXPORT extern NSString* const CBAdvertisementDataIsConnectable;
+#endif  // MAC_OS_X_VERSION_10_9
+
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+BASE_EXPORT extern NSString* const NSUserActivityTypeBrowsingWeb;
+BASE_EXPORT extern NSString* const NSAppearanceNameVibrantDark;
+BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
+#endif  // MAC_OS_X_VERSION_10_10
+}  // extern "C"
+
+// ----------------------------------------------------------------------------
+// If compiling against an older version of the OSX SDK, declare functions that
+// are available in newer versions of the OSX SDK. If compiling against a newer
+// version of the OSX SDK, redeclare those same functions to suppress
+// -Wpartial-availability warnings.
+// ----------------------------------------------------------------------------
+
+// Once Chrome no longer supports OSX 10.6, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+
+@interface NSEvent (LionSDK)
++ (BOOL)isSwipeTrackingFromScrollEventsEnabled;
+- (NSEventPhase)momentumPhase;
+- (NSEventPhase)phase;
+- (BOOL)hasPreciseScrollingDeltas;
+- (CGFloat)scrollingDeltaX;
+- (CGFloat)scrollingDeltaY;
+- (void)trackSwipeEventWithOptions:(NSEventSwipeTrackingOptions)options
+          dampenAmountThresholdMin:(CGFloat)minDampenThreshold
+                               max:(CGFloat)maxDampenThreshold
+                      usingHandler:(void (^)(CGFloat gestureAmount,
+                                             NSEventPhase phase,
+                                             BOOL isComplete,
+                                             BOOL* stop))trackingHandler;
+- (BOOL)isDirectionInvertedFromDevice;
+@end
+
+@interface NSApplication (LionSDK)
+- (void)disableRelaunchOnLogin;
+@end
+
+@interface CALayer (LionSDK)
+- (CGFloat)contentsScale;
+- (void)setContentsScale:(CGFloat)contentsScale;
+@end
+
+@interface NSScreen (LionSDK)
+- (CGFloat)backingScaleFactor;
+- (NSRect)convertRectToBacking:(NSRect)aRect;
+@end
+
+@interface NSWindow (LionSDK)
+- (CGFloat)backingScaleFactor;
+- (NSWindowAnimationBehavior)animationBehavior;
+- (void)setAnimationBehavior:(NSWindowAnimationBehavior)newAnimationBehavior;
+- (void)toggleFullScreen:(id)sender;
+- (void)setRestorable:(BOOL)flag;
+- (NSRect)convertRectFromScreen:(NSRect)aRect;
+- (NSRect)convertRectToScreen:(NSRect)aRect;
+@end
+
+@interface NSCursor (LionSDKDeclarations)
++ (NSCursor*)IBeamCursorForVerticalLayout;
+@end
+
+@interface NSAnimationContext (LionSDK)
++ (void)runAnimationGroup:(void (^)(NSAnimationContext* context))changes
+        completionHandler:(void (^)(void))completionHandler;
+@property(copy) void (^completionHandler)(void);
+@end
+
+@interface NSView (LionSDK)
+- (NSSize)convertSizeFromBacking:(NSSize)size;
+- (void)setWantsBestResolutionOpenGLSurface:(BOOL)flag;
+- (NSDraggingSession*)beginDraggingSessionWithItems:(NSArray*)items
+                                              event:(NSEvent*)event
+                                             source:
+                                                 (id<NSDraggingSource>)source;
+@end
+
+@interface NSObject (ICCameraDeviceDelegateLionSDK)
+- (void)deviceDidBecomeReadyWithCompleteContentCatalog:(ICDevice*)device;
+- (void)didDownloadFile:(ICCameraFile*)file
+                  error:(NSError*)error
+                options:(NSDictionary*)options
+            contextInfo:(void*)contextInfo;
+@end
+
+@interface CWInterface (LionSDK)
+- (BOOL)associateToNetwork:(CWNetwork*)network
+                  password:(NSString*)password
+                     error:(NSError**)error;
+- (NSSet*)scanForNetworksWithName:(NSString*)networkName error:(NSError**)error;
+@end
+
+@interface CWChannel (LionSDK)
+@property(readonly) CWChannelBand channelBand;
+@end
+
+@interface CWNetwork (LionSDK)
+@property(readonly) CWChannel* wlanChannel;
+@property(readonly) NSInteger rssiValue;
+- (BOOL)supportsSecurity:(CWSecurity)security;
+@end
+
+@interface IOBluetoothHostController (LionSDK)
+- (NSString*)nameAsString;
+- (BluetoothHCIPowerState)powerState;
+@end
+
+@interface IOBluetoothL2CAPChannel (LionSDK)
+@property(readonly) BluetoothL2CAPMTU outgoingMTU;
+@end
+
+@interface IOBluetoothDevice (LionSDK)
+- (NSString*)addressString;
+- (unsigned int)classOfDevice;
+- (BluetoothConnectionHandle)connectionHandle;
+- (BluetoothHCIRSSIValue)rawRSSI;
+- (NSArray*)services;
+- (IOReturn)performSDPQuery:(id)target uuids:(NSArray*)uuids;
+@end
+
+@interface CBPeripheral (LionSDK)
+@property(readonly, nonatomic) CFUUIDRef UUID;
+@property(retain, readonly) NSString* name;
+@property(readonly) BOOL isConnected;
+@end
+
+@interface CBCentralManager (LionSDK)
+@property(readonly) CBCentralManagerState state;
+- (id)initWithDelegate:(id<CBCentralManagerDelegate>)delegate
+                 queue:(dispatch_queue_t)queue;
+- (void)scanForPeripheralsWithServices:(NSArray*)serviceUUIDs
+                               options:(NSDictionary*)options;
+- (void)stopScan;
+@end
+
+@interface CBUUID (LionSDK)
+@property(nonatomic, readonly) NSData* data;
++ (CBUUID*)UUIDWithString:(NSString*)theString;
+@end
+
+BASE_EXPORT extern "C" void NSAccessibilityPostNotificationWithUserInfo(
+    id object,
+    NSString* notification,
+    NSDictionary* user_info);
+
+#endif  // MAC_OS_X_VERSION_10_7
+
+// Once Chrome no longer supports OSX 10.7, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_8) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_8
+
+@interface NSColor (MountainLionSDK)
+- (CGColorRef)CGColor;
+@end
+
+@interface NSUUID (MountainLionSDK)
+- (NSString*)UUIDString;
+@end
+
+@interface NSControl (MountainLionSDK)
+@property BOOL allowsExpansionToolTips;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_8
+
+// Once Chrome no longer supports OSX 10.8, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+
+@interface NSProgress (MavericksSDK)
+
+- (instancetype)initWithParent:(NSProgress*)parentProgressOrNil
+                      userInfo:(NSDictionary*)userInfoOrNil;
+@property(copy) NSString* kind;
+
+@property int64_t totalUnitCount;
+@property int64_t completedUnitCount;
+
+@property(getter=isCancellable) BOOL cancellable;
+@property(getter=isPausable) BOOL pausable;
+@property(readonly, getter=isCancelled) BOOL cancelled;
+@property(readonly, getter=isPaused) BOOL paused;
+@property(copy) void (^cancellationHandler)(void);
+@property(copy) void (^pausingHandler)(void);
+- (void)cancel;
+- (void)pause;
+
+- (void)setUserInfoObject:(id)objectOrNil forKey:(NSString*)key;
+- (NSDictionary*)userInfo;
+
+@property(readonly, getter=isIndeterminate) BOOL indeterminate;
+@property(readonly) double fractionCompleted;
+
+- (void)publish;
+- (void)unpublish;
+
+@end
+
+@interface NSScreen (MavericksSDK)
++ (BOOL)screensHaveSeparateSpaces;
+@end
+
+@interface NSView (MavericksSDK)
+- (void)setCanDrawSubviewsIntoLayer:(BOOL)flag;
+- (void)setAppearance:(NSAppearance*)appearance;
+- (NSAppearance*)effectiveAppearance;
+@end
+
+@interface NSWindow (MavericksSDK)
+- (NSWindowOcclusionState)occlusionState;
+@end
+
+@interface NSAppearance (MavericksSDK)
++ (id<NSObject>)appearanceNamed:(NSString*)name;
+@end
+
+@interface CBPeripheral (MavericksSDK)
+@property(readonly, nonatomic) NSUUID* identifier;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_9
+
+// Once Chrome no longer supports OSX 10.9, everything within this preprocessor
+// block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+
+@interface CBUUID (YosemiteSDK)
+- (NSString*)UUIDString;
+@end
+
+@interface NSViewController (YosemiteSDK)
+- (void)viewDidLoad;
+@end
+
+@interface NSWindow (YosemiteSDK)
+- (void)setTitlebarAppearsTransparent:(BOOL)flag;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_10
+
+// Once Chrome no longer supports OSX 10.10.2, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_10_3) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10_3
+
+@interface NSEvent (YosemiteSDK)
+@property(readonly) NSInteger stage;
+@end
+
+@interface NSView (YosemiteSDK)
+- (void)setPressureConfiguration:(NSPressureConfiguration*)aConfiguration;
+@end
+
+#endif  // MAC_OS_X_VERSION_10_10
+
+// ----------------------------------------------------------------------------
+// The symbol for kCWSSIDDidChangeNotification is available in the
+// CoreWLAN.framework for OSX versions 10.6 through 10.10. The symbol is not
+// declared in the OSX 10.9+ SDK, so when compiling against an OSX 10.9+ SDK,
+// declare the symbol.
+// ----------------------------------------------------------------------------
+#if defined(MAC_OS_X_VERSION_10_9) && \
+    MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_9
+BASE_EXPORT extern "C" NSString* const kCWSSIDDidChangeNotification;
+#endif
+#endif  // BASE_MAC_SDK_FORWARD_DECLARATIONS_H_
diff --git a/base/mac/sdk_forward_declarations.mm b/base/mac/sdk_forward_declarations.mm
new file mode 100644
index 0000000..4e1d7ec
--- /dev/null
+++ b/base/mac/sdk_forward_declarations.mm
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/sdk_forward_declarations.h"
+
+#if !defined(MAC_OS_X_VERSION_10_7) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
+NSString* const NSWindowWillEnterFullScreenNotification =
+    @"NSWindowWillEnterFullScreenNotification";
+
+NSString* const NSWindowWillExitFullScreenNotification =
+    @"NSWindowWillExitFullScreenNotification";
+
+NSString* const NSWindowDidEnterFullScreenNotification =
+    @"NSWindowDidEnterFullScreenNotification";
+
+NSString* const NSWindowDidExitFullScreenNotification =
+    @"NSWindowDidExitFullScreenNotification";
+
+NSString* const NSWindowDidChangeBackingPropertiesNotification =
+    @"NSWindowDidChangeBackingPropertiesNotification";
+
+NSString* const CBAdvertisementDataServiceDataKey = @"kCBAdvDataServiceData";
+
+NSString* const CBAdvertisementDataServiceUUIDsKey = @"kCBAdvDataServiceUUIDs";
+#endif  // MAC_OS_X_VERSION_10_7
+
+#if !defined(MAC_OS_X_VERSION_10_9) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
+NSString* const NSWindowDidChangeOcclusionStateNotification =
+    @"NSWindowDidChangeOcclusionStateNotification";
+
+NSString* const CBAdvertisementDataOverflowServiceUUIDsKey =
+    @"kCBAdvDataOverflowServiceUUIDs";
+
+NSString* const CBAdvertisementDataIsConnectable = @"kCBAdvDataIsConnectable";
+#endif  // MAC_OS_X_VERSION_10_9
+
+#if !defined(MAC_OS_X_VERSION_10_10) || \
+    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
+NSString* const NSUserActivityTypeBrowsingWeb =
+    @"NSUserActivityTypeBrowsingWeb";
+
+NSString* const NSAppearanceNameVibrantDark = @"NSAppearanceNameVibrantDark";
+#endif  // MAC_OS_X_VERSION_10_10
diff --git a/base/macros.h b/base/macros.h
index c4dfdbc..4c62300 100644
--- a/base/macros.h
+++ b/base/macros.h
@@ -12,21 +12,30 @@
 
 #include <stddef.h>  // For size_t.
 
+#if defined(ANDROID)
+// Prefer Android's libbase definitions to our own.
+#include <android-base/macros.h>
+#endif  // defined(ANDROID)
+
 // Put this in the declarations for a class to be uncopyable.
+#if !defined(DISALLOW_COPY)
 #define DISALLOW_COPY(TypeName) \
   TypeName(const TypeName&) = delete
+#endif
 
 // Put this in the declarations for a class to be unassignable.
+#if !defined(DISALLOW_ASSIGN)
 #define DISALLOW_ASSIGN(TypeName) \
   void operator=(const TypeName&) = delete
+#endif
 
 // A macro to disallow the copy constructor and operator= functions
 // This should be used in the private: declarations for a class
 // We define this macro conditionally as it may be defined by another libraries.
 #if !defined(DISALLOW_COPY_AND_ASSIGN)
 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
-  TypeName(const TypeName&);               \
-  void operator=(const TypeName&)
+  TypeName(const TypeName&) = delete;      \
+  void operator=(const TypeName&) = delete
 #endif
 
 // A macro to disallow all the implicit constructors, namely the
@@ -35,9 +44,11 @@
 // This should be used in the private: declarations for a class
 // that wants to prevent anyone from instantiating it. This is
 // especially useful for classes containing only static methods.
+#if !defined(DISALLOW_IMPLICIT_CONSTRUCTORS)
 #define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
   TypeName() = delete;                           \
   DISALLOW_COPY_AND_ASSIGN(TypeName)
+#endif
 
 // The arraysize(arr) macro returns the # of elements in an array arr.  The
 // expression is a compile-time constant, and therefore can be used in defining
@@ -48,14 +59,16 @@
 // This template function declaration is used in defining arraysize.
 // Note that the function doesn't need an implementation, as we only
 // use its type.
+#if !defined(arraysize)
 template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
 #define arraysize(array) (sizeof(ArraySizeHelper(array)))
+#endif
 
 // Used to explicitly mark the return value of a function as unused. If you are
 // really sure you don't want to do anything with the return value of a function
 // that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
 //
-//   scoped_ptr<MyType> my_var = ...;
+//   std::unique_ptr<MyType> my_var = ...;
 //   if (TakeOwnership(my_var.get()) == SUCCESS)
 //     ignore_result(my_var.release());
 //
@@ -82,8 +95,10 @@
 // Use these to declare and define a static local variable (static T;) so that
 // it is leaked so that its destructors are not called at exit. If you need
 // thread-safe initialization, use base/lazy_instance.h instead.
+#if !defined(CR_DEFINE_STATIC_LOCAL)
 #define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
   static type& name = *new type arguments
+#endif
 
 }  // base
 
diff --git a/base/md5_unittest.cc b/base/md5_unittest.cc
index 3926b66..b27efe9 100644
--- a/base/md5_unittest.cc
+++ b/base/md5_unittest.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/md5.h"
+
 #include <string.h>
+
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
-#include "base/md5.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -66,7 +68,7 @@
 
 TEST(MD5, MD5SumLongData) {
   const int length = 10 * 1024 * 1024 + 1;
-  scoped_ptr<char[]> data(new char[length]);
+  std::unique_ptr<char[]> data(new char[length]);
 
   for (int i = 0; i < length; ++i)
     data[i] = i & 0xFF;
@@ -108,7 +110,7 @@
   MD5Init(&ctx);
 
   const int length = 10 * 1024 * 1024 + 1;
-  scoped_ptr<char[]> data(new char[length]);
+  std::unique_ptr<char[]> data(new char[length]);
 
   for (int i = 0; i < length; ++i)
     data[i] = i & 0xFF;
diff --git a/base/memory/aligned_memory.h b/base/memory/aligned_memory.h
index bb7bd87..d829011 100644
--- a/base/memory/aligned_memory.h
+++ b/base/memory/aligned_memory.h
@@ -26,9 +26,9 @@
 //   // ... later, to release the memory:
 //   AlignedFree(my_array);
 //
-// Or using scoped_ptr:
+// Or using unique_ptr:
 //
-//   scoped_ptr<float, AlignedFreeDeleter> my_array(
+//   std::unique_ptr<float, AlignedFreeDeleter> my_array(
 //       static_cast<float*>(AlignedAlloc(size, alignment)));
 
 #ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
@@ -104,8 +104,8 @@
 #endif
 }
 
-// Deleter for use with scoped_ptr. E.g., use as
-//   scoped_ptr<Foo, base::AlignedFreeDeleter> foo;
+// Deleter for use with unique_ptr. E.g., use as
+//   std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
 struct AlignedFreeDeleter {
   inline void operator()(void* ptr) const {
     AlignedFree(ptr);
diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc
index b89e341..abe0cf3 100644
--- a/base/memory/aligned_memory_unittest.cc
+++ b/base/memory/aligned_memory_unittest.cc
@@ -3,7 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
+
+#include <memory>
+
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -92,7 +94,7 @@
 }
 
 TEST(AlignedMemoryTest, ScopedDynamicAllocation) {
-  scoped_ptr<float, base::AlignedFreeDeleter> p(
+  std::unique_ptr<float, base::AlignedFreeDeleter> p(
       static_cast<float*>(base::AlignedAlloc(8, 8)));
   EXPECT_TRUE(p.get());
   EXPECT_ALIGNED(p.get(), 8);
diff --git a/base/memory/free_deleter.h b/base/memory/free_deleter.h
new file mode 100644
index 0000000..5604118
--- /dev/null
+++ b/base/memory/free_deleter.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FREE_DELETER_H_
+#define BASE_MEMORY_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+//     static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+  inline void operator()(void* ptr) const {
+    free(ptr);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_FREE_DELETER_H_
diff --git a/base/memory/manual_constructor.h b/base/memory/manual_constructor.h
index 56081a1..f401f62 100644
--- a/base/memory/manual_constructor.h
+++ b/base/memory/manual_constructor.h
@@ -54,8 +54,12 @@
   inline const Type& operator*() const { return *get(); }
 
   template <typename... Ts>
-  inline void Init(const Ts&... params) {
-    new(space_.void_data()) Type(params...);
+  inline void Init(Ts&&... params) {
+    new(space_.void_data()) Type(std::forward<Ts>(params)...);
+  }
+
+  inline void InitFromMove(ManualConstructor<Type>&& o) {
+    Init(std::move(*o));
   }
 
   inline void Destroy() {
diff --git a/base/memory/ptr_util.h b/base/memory/ptr_util.h
new file mode 100644
index 0000000..8747ac9
--- /dev/null
+++ b/base/memory/ptr_util.h
@@ -0,0 +1,74 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PTR_UTIL_H_
+#define BASE_MEMORY_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+namespace base {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+  return std::unique_ptr<T>(ptr);
+}
+
+namespace internal {
+
+template <typename T>
+struct MakeUniqueResult {
+  using Scalar = std::unique_ptr<T>;
+};
+
+template <typename T>
+struct MakeUniqueResult<T[]> {
+  using Array = std::unique_ptr<T[]>;
+};
+
+template <typename T, size_t N>
+struct MakeUniqueResult<T[N]> {
+  using Invalid = void;
+};
+
+}  // namespace internal
+
+// Helper to construct an object wrapped in a std::unique_ptr. This is an
+// implementation of C++14's std::make_unique that can be used in Chrome.
+//
+// MakeUnique<T>(args) should be preferred over WrapUnique(new T(args)): bare
+// calls to `new` should be treated with scrutiny.
+//
+// Usage:
+//   // ptr is a std::unique_ptr<std::string>
+//   auto ptr = MakeUnique<std::string>("hello world!");
+//
+//   // arr is a std::unique_ptr<int[]>
+//   auto arr = MakeUnique<int[]>(5);
+
+// Overload for non-array types. Arguments are forwarded to T's constructor.
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Scalar MakeUnique(Args&&... args) {
+  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+// Overload for array types of unknown bound, e.g. T[]. The array is allocated
+// with `new T[n]()` and value-initialized: note that this is distinct from
+// `new T[n]`, which default-initializes.
+template <typename T>
+typename internal::MakeUniqueResult<T>::Array MakeUnique(size_t size) {
+  return std::unique_ptr<T>(new typename std::remove_extent<T>::type[size]());
+}
+
+// Overload to reject array types of known bound, e.g. T[n].
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Invalid MakeUnique(Args&&... args) =
+    delete;
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PTR_UTIL_H_
diff --git a/base/memory/raw_scoped_refptr_mismatch_checker.h b/base/memory/raw_scoped_refptr_mismatch_checker.h
index 09f982b..5dbc183 100644
--- a/base/memory/raw_scoped_refptr_mismatch_checker.h
+++ b/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -5,10 +5,10 @@
 #ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
 #define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
 
+#include <tuple>
+#include <type_traits>
+
 #include "base/memory/ref_counted.h"
-#include "base/template_util.h"
-#include "base/tuple.h"
-#include "build/build_config.h"
 
 // It is dangerous to post a task with a T* argument where T is a subtype of
 // RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
@@ -25,20 +25,14 @@
 
 template <typename T>
 struct NeedsScopedRefptrButGetsRawPtr {
-#if defined(OS_WIN)
-  enum {
-    value = base::false_type::value
-  };
-#else
   enum {
     // Human readable translation: you needed to be a scoped_refptr if you are a
     // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
     // type.
-    value = (is_pointer<T>::value &&
-             (is_convertible<T, subtle::RefCountedBase*>::value ||
-              is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
+    value = (std::is_pointer<T>::value &&
+             (std::is_convertible<T, subtle::RefCountedBase*>::value ||
+              std::is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
   };
-#endif
 };
 
 template <typename Params>
@@ -47,14 +41,14 @@
 };
 
 template <>
-struct ParamsUseScopedRefptrCorrectly<Tuple<>> {
+struct ParamsUseScopedRefptrCorrectly<std::tuple<>> {
   enum { value = 1 };
 };
 
 template <typename Head, typename... Tail>
-struct ParamsUseScopedRefptrCorrectly<Tuple<Head, Tail...>> {
+struct ParamsUseScopedRefptrCorrectly<std::tuple<Head, Tail...>> {
   enum { value = !NeedsScopedRefptrButGetsRawPtr<Head>::value &&
-                 ParamsUseScopedRefptrCorrectly<Tuple<Tail...>>::value };
+                  ParamsUseScopedRefptrCorrectly<std::tuple<Tail...>>::value };
 };
 
 }  // namespace internal
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index a1c1269..b026d9a 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -5,8 +5,11 @@
 #ifndef BASE_MEMORY_REF_COUNTED_H_
 #define BASE_MEMORY_REF_COUNTED_H_
 
+#include <stddef.h>
+
 #include <cassert>
 #include <iosfwd>
+#include <type_traits>
 
 #include "base/atomic_ref_count.h"
 #include "base/base_export.h"
@@ -108,7 +111,7 @@
 
 //
 // A base class for reference counted classes.  Otherwise, known as a cheap
-// knock-off of WebKit's RefCounted<T> class.  To use this guy just extend your
+// knock-off of WebKit's RefCounted<T> class.  To use this, just extend your
 // class from it like so:
 //
 //   class MyFoo : public base::RefCounted<MyFoo> {
@@ -283,7 +286,9 @@
   }
 
   // Copy conversion constructor.
-  template <typename U>
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
   scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
     if (ptr_)
       AddRef(ptr_);
@@ -294,7 +299,9 @@
   scoped_refptr(scoped_refptr&& r) : ptr_(r.get()) { r.ptr_ = nullptr; }
 
   // Move conversion constructor.
-  template <typename U>
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
   scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.get()) {
     r.ptr_ = nullptr;
   }
@@ -357,20 +364,7 @@
     swap(&r.ptr_);
   }
 
- private:
-  template <typename U> friend class scoped_refptr;
-
-  // Allow scoped_refptr<T> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
-  //
-  // Note that this trick is only safe when the == and != operators
-  // are declared explicitly, as otherwise "refptr1 == refptr2"
-  // will compile but do the wrong thing (i.e., convert to Testable
-  // and then do the comparison).
-  typedef T* scoped_refptr::*Testable;
-
- public:
-  operator Testable() const { return ptr_ ? &scoped_refptr::ptr_ : nullptr; }
+  explicit operator bool() const { return ptr_ != nullptr; }
 
   template <typename U>
   bool operator==(const scoped_refptr<U>& rhs) const {
@@ -391,6 +385,10 @@
   T* ptr_;
 
  private:
+  // Friend required for move constructors that set r.ptr_ to null.
+  template <typename U>
+  friend class scoped_refptr;
+
   // Non-inline helpers to allow:
   //     class Opaque;
   //     extern template class scoped_refptr<Opaque>;
@@ -416,8 +414,6 @@
   return scoped_refptr<T>(t);
 }
 
-// Temporary operator overloads to facilitate the transition. See
-// https://crbug.com/110610.
 template <typename T, typename U>
 bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
   return lhs.get() == rhs;
@@ -428,6 +424,16 @@
   return lhs == rhs.get();
 }
 
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t) {
+  return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t, const scoped_refptr<T>& rhs) {
+  return !static_cast<bool>(rhs);
+}
+
 template <typename T, typename U>
 bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
   return !operator==(lhs, rhs);
@@ -439,6 +445,16 @@
 }
 
 template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+  return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+  return !operator==(null, rhs);
+}
+
+template <typename T>
 std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
   return out << p.get();
 }
diff --git a/base/memory/ref_counted_delete_on_message_loop.h b/base/memory/ref_counted_delete_on_message_loop.h
index 84f80d8..de194e8 100644
--- a/base/memory/ref_counted_delete_on_message_loop.h
+++ b/base/memory/ref_counted_delete_on_message_loop.h
@@ -19,10 +19,8 @@
 // Sample usage:
 // class Foo : public RefCountedDeleteOnMessageLoop<Foo> {
 //
-//   Foo(const scoped_refptr<SingleThreadTaskRunner>& loop)
-//       : RefCountedDeleteOnMessageLoop<Foo>(loop) {
-//     ...
-//   }
+//   Foo(scoped_refptr<SingleThreadTaskRunner> loop)
+//       : RefCountedDeleteOnMessageLoop<Foo>(std::move(loop)) {}
 //   ...
 //  private:
 //   friend class RefCountedDeleteOnMessageLoop<Foo>;
@@ -40,8 +38,8 @@
   // MessageLoop on the current thread can be acquired by calling
   // MessageLoop::current()->task_runner().
   RefCountedDeleteOnMessageLoop(
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner)
-      : task_runner_(task_runner) {
+      scoped_refptr<SingleThreadTaskRunner> task_runner)
+      : task_runner_(std::move(task_runner)) {
     DCHECK(task_runner_);
   }
 
diff --git a/base/memory/ref_counted_memory.cc b/base/memory/ref_counted_memory.cc
index 7bbd317..26b78f3 100644
--- a/base/memory/ref_counted_memory.cc
+++ b/base/memory/ref_counted_memory.cc
@@ -38,9 +38,9 @@
 RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
     : data_(p, p + size) {}
 
-RefCountedBytes* RefCountedBytes::TakeVector(
+scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
     std::vector<unsigned char>* to_destroy) {
-  RefCountedBytes* bytes = new RefCountedBytes;
+  scoped_refptr<RefCountedBytes> bytes(new RefCountedBytes);
   bytes->data_.swap(*to_destroy);
   return bytes;
 }
diff --git a/base/memory/ref_counted_memory.h b/base/memory/ref_counted_memory.h
index f37a860..aa22c9e 100644
--- a/base/memory/ref_counted_memory.h
+++ b/base/memory/ref_counted_memory.h
@@ -81,7 +81,8 @@
   // Constructs a RefCountedBytes object by performing a swap. (To non
   // destructively build a RefCountedBytes, use the constructor that takes a
   // vector.)
-  static RefCountedBytes* TakeVector(std::vector<unsigned char>* to_destroy);
+  static scoped_refptr<RefCountedBytes> TakeVector(
+      std::vector<unsigned char>* to_destroy);
 
   // Overridden from RefCountedMemory:
   const unsigned char* front() const override;
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index dbc6f33..7c4e07a 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -105,6 +105,22 @@
 int ScopedRefPtrCountDerived::constructor_count_ = 0;
 int ScopedRefPtrCountDerived::destructor_count_ = 0;
 
+class Other : public base::RefCounted<Other> {
+ private:
+  friend class base::RefCounted<Other>;
+
+  ~Other() {}
+};
+
+scoped_refptr<Other> Overloaded(scoped_refptr<Other> other) {
+  return other;
+}
+
+scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
+  return self_assign;
+}
+
+
 }  // end namespace
 
 TEST(RefCountedUnitTest, TestSelfAssignment) {
@@ -150,10 +166,31 @@
 }
 
 TEST(RefCountedUnitTest, BooleanTesting) {
-  scoped_refptr<SelfAssign> p;
-  EXPECT_FALSE(p);
-  p = new SelfAssign;
-  EXPECT_TRUE(p);
+  scoped_refptr<SelfAssign> ptr_to_an_instance = new SelfAssign;
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  scoped_refptr<SelfAssign> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
 }
 
 TEST(RefCountedUnitTest, Equality) {
@@ -167,6 +204,16 @@
   EXPECT_NE(p2, p1);
 }
 
+TEST(RefCountedUnitTest, NullptrEquality) {
+  scoped_refptr<SelfAssign> ptr_to_an_instance(new SelfAssign);
+  scoped_refptr<SelfAssign> ptr_to_nullptr;
+
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+  EXPECT_EQ(nullptr, ptr_to_nullptr);
+  EXPECT_EQ(ptr_to_nullptr, nullptr);
+}
+
 TEST(RefCountedUnitTest, ConvertibleEquality) {
   scoped_refptr<Derived> p1(new Derived);
   scoped_refptr<SelfAssign> p2;
@@ -440,3 +487,21 @@
   EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
 }
 
+TEST(RefCountedUnitTest, TestOverloadResolutionCopy) {
+  scoped_refptr<Derived> derived(new Derived);
+  scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(derived));
+
+  scoped_refptr<Other> other(new Other);
+  EXPECT_EQ(other, Overloaded(other));
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
+  scoped_refptr<Derived> derived(new Derived);
+  scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(std::move(derived)));
+
+  scoped_refptr<Other> other(new Other);
+  scoped_refptr<Other> other2(other);
+  EXPECT_EQ(other2, Overloaded(std::move(other)));
+}
diff --git a/base/memory/scoped_ptr.h b/base/memory/scoped_ptr.h
deleted file mode 100644
index 89b90ac..0000000
--- a/base/memory/scoped_ptr.h
+++ /dev/null
@@ -1,609 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Scopers help you manage ownership of a pointer, helping you easily manage a
-// pointer within a scope, and automatically destroying the pointer at the end
-// of a scope.  There are two main classes you will use, which correspond to the
-// operators new/delete and new[]/delete[].
-//
-// Example usage (scoped_ptr<T>):
-//   {
-//     scoped_ptr<Foo> foo(new Foo("wee"));
-//   }  // foo goes out of scope, releasing the pointer with it.
-//
-//   {
-//     scoped_ptr<Foo> foo;          // No pointer managed.
-//     foo.reset(new Foo("wee"));    // Now a pointer is managed.
-//     foo.reset(new Foo("wee2"));   // Foo("wee") was destroyed.
-//     foo.reset(new Foo("wee3"));   // Foo("wee2") was destroyed.
-//     foo->Method();                // Foo::Method() called.
-//     foo.get()->Method();          // Foo::Method() called.
-//     SomeFunc(foo.release());      // SomeFunc takes ownership, foo no longer
-//                                   // manages a pointer.
-//     foo.reset(new Foo("wee4"));   // foo manages a pointer again.
-//     foo.reset();                  // Foo("wee4") destroyed, foo no longer
-//                                   // manages a pointer.
-//   }  // foo wasn't managing a pointer, so nothing was destroyed.
-//
-// Example usage (scoped_ptr<T[]>):
-//   {
-//     scoped_ptr<Foo[]> foo(new Foo[100]);
-//     foo.get()->Method();  // Foo::Method on the 0th element.
-//     foo[10].Method();     // Foo::Method on the 10th element.
-//   }
-//
-// These scopers also implement part of the functionality of C++11 unique_ptr
-// in that they are "movable but not copyable."  You can use the scopers in
-// the parameter and return types of functions to signify ownership transfer
-// in to and out of a function.  When calling a function that has a scoper
-// as the argument type, it must be called with an rvalue of a scoper, which
-// can be created by using std::move(), or the result of another function that
-// generates a temporary; passing by copy will NOT work.  Here is an example
-// using scoped_ptr:
-//
-//   void TakesOwnership(scoped_ptr<Foo> arg) {
-//     // Do something with arg.
-//   }
-//   scoped_ptr<Foo> CreateFoo() {
-//     // No need for calling std::move() for returning a move-only value, or
-//     // when you already have an rvalue as we do here.
-//     return scoped_ptr<Foo>(new Foo("new"));
-//   }
-//   scoped_ptr<Foo> PassThru(scoped_ptr<Foo> arg) {
-//     return arg;
-//   }
-//
-//   {
-//     scoped_ptr<Foo> ptr(new Foo("yay"));  // ptr manages Foo("yay").
-//     TakesOwnership(std::move(ptr));       // ptr no longer owns Foo("yay").
-//     scoped_ptr<Foo> ptr2 = CreateFoo();   // ptr2 owns the return Foo.
-//     scoped_ptr<Foo> ptr3 =                // ptr3 now owns what was in ptr2.
-//         PassThru(std::move(ptr2));        // ptr2 is correspondingly nullptr.
-//   }
-//
-// Notice that if you do not call std::move() when returning from PassThru(), or
-// when invoking TakesOwnership(), the code will not compile because scopers
-// are not copyable; they only implement move semantics which require calling
-// the std::move() function to signify a destructive transfer of state.
-// CreateFoo() is different though because we are constructing a temporary on
-// the return line and thus can avoid needing to call std::move().
-//
-// The conversion move-constructor properly handles upcast in initialization,
-// i.e. you can use a scoped_ptr<Child> to initialize a scoped_ptr<Parent>:
-//
-//   scoped_ptr<Foo> foo(new Foo());
-//   scoped_ptr<FooParent> parent(std::move(foo));
-
-#ifndef BASE_MEMORY_SCOPED_PTR_H_
-#define BASE_MEMORY_SCOPED_PTR_H_
-
-// This is an implementation designed to match the anticipated future TR2
-// implementation of the scoped_ptr class.
-
-#include <assert.h>
-#include <stddef.h>
-#include <stdlib.h>
-
-#include <iosfwd>
-#include <memory>
-#include <type_traits>
-#include <utility>
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "base/move.h"
-#include "base/template_util.h"
-
-namespace base {
-
-namespace subtle {
-class RefCountedBase;
-class RefCountedThreadSafeBase;
-}  // namespace subtle
-
-// Function object which invokes 'free' on its parameter, which must be
-// a pointer. Can be used to store malloc-allocated pointers in scoped_ptr:
-//
-// scoped_ptr<int, base::FreeDeleter> foo_ptr(
-//     static_cast<int*>(malloc(sizeof(int))));
-struct FreeDeleter {
-  inline void operator()(void* ptr) const {
-    free(ptr);
-  }
-};
-
-namespace internal {
-
-template <typename T> struct IsNotRefCounted {
-  enum {
-    value = !base::is_convertible<T*, base::subtle::RefCountedBase*>::value &&
-        !base::is_convertible<T*, base::subtle::RefCountedThreadSafeBase*>::
-            value
-  };
-};
-
-// Minimal implementation of the core logic of scoped_ptr, suitable for
-// reuse in both scoped_ptr and its specializations.
-template <class T, class D>
-class scoped_ptr_impl {
- public:
-  explicit scoped_ptr_impl(T* p) : data_(p) {}
-
-  // Initializer for deleters that have data parameters.
-  scoped_ptr_impl(T* p, const D& d) : data_(p, d) {}
-
-  // Templated constructor that destructively takes the value from another
-  // scoped_ptr_impl.
-  template <typename U, typename V>
-  scoped_ptr_impl(scoped_ptr_impl<U, V>* other)
-      : data_(other->release(), other->get_deleter()) {
-    // We do not support move-only deleters.  We could modify our move
-    // emulation to have base::subtle::move() and base::subtle::forward()
-    // functions that are imperfect emulations of their C++11 equivalents,
-    // but until there's a requirement, just assume deleters are copyable.
-  }
-
-  template <typename U, typename V>
-  void TakeState(scoped_ptr_impl<U, V>* other) {
-    // See comment in templated constructor above regarding lack of support
-    // for move-only deleters.
-    reset(other->release());
-    get_deleter() = other->get_deleter();
-  }
-
-  ~scoped_ptr_impl() {
-    // Match libc++, which calls reset() in its destructor.
-    // Use nullptr as the new value for three reasons:
-    // 1. libc++ does it.
-    // 2. Avoids infinitely recursing into destructors if two classes are owned
-    //    in a reference cycle (see ScopedPtrTest.ReferenceCycle).
-    // 3. If |this| is accessed in the future, in a use-after-free bug, attempts
-    //    to dereference |this|'s pointer should cause either a failure or a
-    //    segfault closer to the problem. If |this| wasn't reset to nullptr,
-    //    the access would cause the deleted memory to be read or written
-    //    leading to other more subtle issues.
-    reset(nullptr);
-  }
-
-  void reset(T* p) {
-    // Match C++11's definition of unique_ptr::reset(), which requires changing
-    // the pointer before invoking the deleter on the old pointer. This prevents
-    // |this| from being accessed after the deleter is run, which may destroy
-    // |this|.
-    T* old = data_.ptr;
-    data_.ptr = p;
-    if (old != nullptr)
-      static_cast<D&>(data_)(old);
-  }
-
-  T* get() const { return data_.ptr; }
-
-  D& get_deleter() { return data_; }
-  const D& get_deleter() const { return data_; }
-
-  void swap(scoped_ptr_impl& p2) {
-    // Standard swap idiom: 'using std::swap' ensures that std::swap is
-    // present in the overload set, but we call swap unqualified so that
-    // any more-specific overloads can be used, if available.
-    using std::swap;
-    swap(static_cast<D&>(data_), static_cast<D&>(p2.data_));
-    swap(data_.ptr, p2.data_.ptr);
-  }
-
-  T* release() {
-    T* old_ptr = data_.ptr;
-    data_.ptr = nullptr;
-    return old_ptr;
-  }
-
- private:
-  // Needed to allow type-converting constructor.
-  template <typename U, typename V> friend class scoped_ptr_impl;
-
-  // Use the empty base class optimization to allow us to have a D
-  // member, while avoiding any space overhead for it when D is an
-  // empty class.  See e.g. http://www.cantrip.org/emptyopt.html for a good
-  // discussion of this technique.
-  struct Data : public D {
-    explicit Data(T* ptr_in) : ptr(ptr_in) {}
-    Data(T* ptr_in, const D& other) : D(other), ptr(ptr_in) {}
-    T* ptr;
-  };
-
-  Data data_;
-
-  DISALLOW_COPY_AND_ASSIGN(scoped_ptr_impl);
-};
-
-}  // namespace internal
-
-}  // namespace base
-
-// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
-// automatically deletes the pointer it holds (if any).
-// That is, scoped_ptr<T> owns the T object that it points to.
-// Like a T*, a scoped_ptr<T> may hold either nullptr or a pointer to a T
-// object. Also like T*, scoped_ptr<T> is thread-compatible, and once you
-// dereference it, you get the thread safety guarantees of T.
-//
-// The size of scoped_ptr is small. On most compilers, when using the
-// std::default_delete, sizeof(scoped_ptr<T>) == sizeof(T*). Custom deleters
-// will increase the size proportional to whatever state they need to have. See
-// comments inside scoped_ptr_impl<> for details.
-//
-// Current implementation targets having a strict subset of  C++11's
-// unique_ptr<> features. Known deficiencies include not supporting move-only
-// deleteres, function pointers as deleters, and deleters with reference
-// types.
-template <class T, class D = std::default_delete<T>>
-class scoped_ptr {
-  DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(scoped_ptr)
-
-  static_assert(!std::is_array<T>::value,
-                "scoped_ptr doesn't support array with size");
-  static_assert(base::internal::IsNotRefCounted<T>::value,
-                "T is a refcounted type and needs a scoped_refptr");
-
- public:
-  // The element and deleter types.
-  using element_type = T;
-  using deleter_type = D;
-
-  // Constructor.  Defaults to initializing with nullptr.
-  scoped_ptr() : impl_(nullptr) {}
-
-  // Constructor.  Takes ownership of p.
-  explicit scoped_ptr(element_type* p) : impl_(p) {}
-
-  // Constructor.  Allows initialization of a stateful deleter.
-  scoped_ptr(element_type* p, const D& d) : impl_(p, d) {}
-
-  // Constructor.  Allows construction from a nullptr.
-  scoped_ptr(std::nullptr_t) : impl_(nullptr) {}
-
-  // Move constructor.
-  //
-  // IMPLEMENTATION NOTE: Clang requires a move constructor to be defined (and
-  // not just the conversion constructor) in order to warn on pessimizing moves.
-  // The requirements for the move constructor are specified in C++11
-  // 20.7.1.2.1.15-17, which has some subtleties around reference deleters. As
-  // we don't support reference (or move-only) deleters, the post conditions are
-  // trivially true: we always copy construct the deleter from other's deleter.
-  scoped_ptr(scoped_ptr&& other) : impl_(&other.impl_) {}
-
-  // Conversion constructor.  Allows construction from a scoped_ptr rvalue for a
-  // convertible type and deleter.
-  //
-  // IMPLEMENTATION NOTE: C++ 20.7.1.2.1.19 requires this constructor to only
-  // participate in overload resolution if all the following are true:
-  // - U is implicitly convertible to T: this is important for 2 reasons:
-  //     1. So type traits don't incorrectly return true, e.g.
-  //          std::is_convertible<scoped_ptr<Base>, scoped_ptr<Derived>>::value
-  //        should be false.
-  //     2. To make sure code like this compiles:
-  //        void F(scoped_ptr<int>);
-  //        void F(scoped_ptr<Base>);
-  //        // Ambiguous since both conversion constructors match.
-  //        F(scoped_ptr<Derived>());
-  // - U is not an array type: to prevent conversions from scoped_ptr<T[]> to
-  //   scoped_ptr<T>.
-  // - D is a reference type and E is the same type, or D is not a reference
-  //   type and E is implicitly convertible to D: again, we don't support
-  //   reference deleters, so we only worry about the latter requirement.
-  template <typename U,
-            typename E,
-            typename std::enable_if<!std::is_array<U>::value &&
-                                    std::is_convertible<U*, T*>::value &&
-                                    std::is_convertible<E, D>::value>::type* =
-                nullptr>
-  scoped_ptr(scoped_ptr<U, E>&& other)
-      : impl_(&other.impl_) {}
-
-  // operator=.
-  //
-  // IMPLEMENTATION NOTE: Unlike the move constructor, Clang does not appear to
-  // require a move assignment operator to trigger the pessimizing move warning:
-  // in this case, the warning triggers when moving a temporary. For consistency
-  // with the move constructor, we define it anyway. C++11 20.7.1.2.3.1-3
-  // defines several requirements around this: like the move constructor, the
-  // requirements are simplified by the fact that we don't support move-only or
-  // reference deleters.
-  scoped_ptr& operator=(scoped_ptr&& rhs) {
-    impl_.TakeState(&rhs.impl_);
-    return *this;
-  }
-
-  // operator=.  Allows assignment from a scoped_ptr rvalue for a convertible
-  // type and deleter.
-  //
-  // IMPLEMENTATION NOTE: C++11 unique_ptr<> keeps this operator= distinct from
-  // the normal move assignment operator. C++11 20.7.1.2.3.4-7 contains the
-  // requirement for this operator, but like the conversion constructor, the
-  // requirements are greatly simplified by not supporting move-only or
-  // reference deleters.
-  template <typename U,
-            typename E,
-            typename std::enable_if<!std::is_array<U>::value &&
-                                    std::is_convertible<U*, T*>::value &&
-                                    // Note that this really should be
-                                    // std::is_assignable, but <type_traits>
-                                    // appears to be missing this on some
-                                    // platforms. This is close enough (though
-                                    // it's not the same).
-                                    std::is_convertible<D, E>::value>::type* =
-                nullptr>
-  scoped_ptr& operator=(scoped_ptr<U, E>&& rhs) {
-    impl_.TakeState(&rhs.impl_);
-    return *this;
-  }
-
-  // operator=.  Allows assignment from a nullptr. Deletes the currently owned
-  // object, if any.
-  scoped_ptr& operator=(std::nullptr_t) {
-    reset();
-    return *this;
-  }
-
-  // Reset.  Deletes the currently owned object, if any.
-  // Then takes ownership of a new object, if given.
-  void reset(element_type* p = nullptr) { impl_.reset(p); }
-
-  // Accessors to get the owned object.
-  // operator* and operator-> will assert() if there is no current object.
-  element_type& operator*() const {
-    assert(impl_.get() != nullptr);
-    return *impl_.get();
-  }
-  element_type* operator->() const  {
-    assert(impl_.get() != nullptr);
-    return impl_.get();
-  }
-  element_type* get() const { return impl_.get(); }
-
-  // Access to the deleter.
-  deleter_type& get_deleter() { return impl_.get_deleter(); }
-  const deleter_type& get_deleter() const { return impl_.get_deleter(); }
-
-  // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
-  //
-  // Note that this trick is only safe when the == and != operators
-  // are declared explicitly, as otherwise "scoped_ptr1 ==
-  // scoped_ptr2" will compile but do the wrong thing (i.e., convert
-  // to Testable and then do the comparison).
- private:
-  typedef base::internal::scoped_ptr_impl<element_type, deleter_type>
-      scoped_ptr::*Testable;
-
- public:
-  operator Testable() const {
-    return impl_.get() ? &scoped_ptr::impl_ : nullptr;
-  }
-
-  // Swap two scoped pointers.
-  void swap(scoped_ptr& p2) {
-    impl_.swap(p2.impl_);
-  }
-
-  // Release a pointer.
-  // The return value is the current pointer held by this object. If this object
-  // holds a nullptr, the return value is nullptr. After this operation, this
-  // object will hold a nullptr, and will not own the object any more.
-  element_type* release() WARN_UNUSED_RESULT {
-    return impl_.release();
-  }
-
- private:
-  // Needed to reach into |impl_| in the constructor.
-  template <typename U, typename V> friend class scoped_ptr;
-  base::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
-
-  // Forbidden for API compatibility with std::unique_ptr.
-  explicit scoped_ptr(int disallow_construction_from_null);
-};
-
-template <class T, class D>
-class scoped_ptr<T[], D> {
-  DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(scoped_ptr)
-
- public:
-  // The element and deleter types.
-  using element_type = T;
-  using deleter_type = D;
-
-  // Constructor.  Defaults to initializing with nullptr.
-  scoped_ptr() : impl_(nullptr) {}
-
-  // Constructor. Stores the given array. Note that the argument's type
-  // must exactly match T*. In particular:
-  // - it cannot be a pointer to a type derived from T, because it is
-  //   inherently unsafe in the general case to access an array through a
-  //   pointer whose dynamic type does not match its static type (eg., if
-  //   T and the derived types had different sizes access would be
-  //   incorrectly calculated). Deletion is also always undefined
-  //   (C++98 [expr.delete]p3). If you're doing this, fix your code.
-  // - it cannot be const-qualified differently from T per unique_ptr spec
-  //   (http://cplusplus.github.com/LWG/lwg-active.html#2118). Users wanting
-  //   to work around this may use const_cast<const T*>().
-  explicit scoped_ptr(element_type* array) : impl_(array) {}
-
-  // Constructor.  Allows construction from a nullptr.
-  scoped_ptr(std::nullptr_t) : impl_(nullptr) {}
-
-  // Constructor.  Allows construction from a scoped_ptr rvalue.
-  scoped_ptr(scoped_ptr&& other) : impl_(&other.impl_) {}
-
-  // operator=.  Allows assignment from a scoped_ptr rvalue.
-  scoped_ptr& operator=(scoped_ptr&& rhs) {
-    impl_.TakeState(&rhs.impl_);
-    return *this;
-  }
-
-  // operator=.  Allows assignment from a nullptr. Deletes the currently owned
-  // array, if any.
-  scoped_ptr& operator=(std::nullptr_t) {
-    reset();
-    return *this;
-  }
-
-  // Reset.  Deletes the currently owned array, if any.
-  // Then takes ownership of a new object, if given.
-  void reset(element_type* array = nullptr) { impl_.reset(array); }
-
-  // Accessors to get the owned array.
-  element_type& operator[](size_t i) const {
-    assert(impl_.get() != nullptr);
-    return impl_.get()[i];
-  }
-  element_type* get() const { return impl_.get(); }
-
-  // Access to the deleter.
-  deleter_type& get_deleter() { return impl_.get_deleter(); }
-  const deleter_type& get_deleter() const { return impl_.get_deleter(); }
-
-  // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
- private:
-  typedef base::internal::scoped_ptr_impl<element_type, deleter_type>
-      scoped_ptr::*Testable;
-
- public:
-  operator Testable() const {
-    return impl_.get() ? &scoped_ptr::impl_ : nullptr;
-  }
-
-  // Swap two scoped pointers.
-  void swap(scoped_ptr& p2) {
-    impl_.swap(p2.impl_);
-  }
-
-  // Release a pointer.
-  // The return value is the current pointer held by this object. If this object
-  // holds a nullptr, the return value is nullptr. After this operation, this
-  // object will hold a nullptr, and will not own the object any more.
-  element_type* release() WARN_UNUSED_RESULT {
-    return impl_.release();
-  }
-
- private:
-  // Force element_type to be a complete type.
-  enum { type_must_be_complete = sizeof(element_type) };
-
-  // Actually hold the data.
-  base::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
-
-  // Disable initialization from any type other than element_type*, by
-  // providing a constructor that matches such an initialization, but is
-  // private and has no definition. This is disabled because it is not safe to
-  // call delete[] on an array whose static type does not match its dynamic
-  // type.
-  template <typename U> explicit scoped_ptr(U* array);
-  explicit scoped_ptr(int disallow_construction_from_null);
-
-  // Disable reset() from any type other than element_type*, for the same
-  // reasons as the constructor above.
-  template <typename U> void reset(U* array);
-  void reset(int disallow_reset_from_null);
-};
-
-// Free functions
-template <class T, class D>
-void swap(scoped_ptr<T, D>& p1, scoped_ptr<T, D>& p2) {
-  p1.swap(p2);
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator==(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return p1.get() == p2.get();
-}
-template <class T, class D>
-bool operator==(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return p.get() == nullptr;
-}
-template <class T, class D>
-bool operator==(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return p.get() == nullptr;
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator!=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return !(p1 == p2);
-}
-template <class T, class D>
-bool operator!=(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return !(p == nullptr);
-}
-template <class T, class D>
-bool operator!=(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return !(p == nullptr);
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator<(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return p1.get() < p2.get();
-}
-template <class T, class D>
-bool operator<(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  auto* ptr = p.get();
-  return ptr < static_cast<decltype(ptr)>(nullptr);
-}
-template <class T, class D>
-bool operator<(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  auto* ptr = p.get();
-  return static_cast<decltype(ptr)>(nullptr) < ptr;
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator>(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return p2 < p1;
-}
-template <class T, class D>
-bool operator>(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return nullptr < p;
-}
-template <class T, class D>
-bool operator>(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return p < nullptr;
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator<=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return !(p1 > p2);
-}
-template <class T, class D>
-bool operator<=(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return !(p > nullptr);
-}
-template <class T, class D>
-bool operator<=(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return !(nullptr > p);
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator>=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return !(p1 < p2);
-}
-template <class T, class D>
-bool operator>=(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return !(p < nullptr);
-}
-template <class T, class D>
-bool operator>=(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return !(nullptr < p);
-}
-
-// A function to convert T* into scoped_ptr<T>
-// Doing e.g. make_scoped_ptr(new FooBarBaz<type>(arg)) is a shorter notation
-// for scoped_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
-template <typename T>
-scoped_ptr<T> make_scoped_ptr(T* ptr) {
-  return scoped_ptr<T>(ptr);
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& out, const scoped_ptr<T>& p) {
-  return out << p.get();
-}
-
-#endif  // BASE_MEMORY_SCOPED_PTR_H_
diff --git a/base/memory/scoped_ptr_unittest.cc b/base/memory/scoped_ptr_unittest.cc
deleted file mode 100644
index 4f0e784..0000000
--- a/base/memory/scoped_ptr_unittest.cc
+++ /dev/null
@@ -1,842 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_ptr.h"
-
-#include <stddef.h>
-
-#include <sstream>
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/macros.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-// Used to test depth subtyping.
-class ConDecLoggerParent {
- public:
-  virtual ~ConDecLoggerParent() {}
-
-  virtual void SetPtr(int* ptr) = 0;
-
-  virtual int SomeMeth(int x) const = 0;
-};
-
-class ConDecLogger : public ConDecLoggerParent {
- public:
-  ConDecLogger() : ptr_(NULL) { }
-  explicit ConDecLogger(int* ptr) { SetPtr(ptr); }
-  ~ConDecLogger() override { --*ptr_; }
-
-  void SetPtr(int* ptr) override {
-    ptr_ = ptr;
-    ++*ptr_;
-  }
-
-  int SomeMeth(int x) const override { return x; }
-
- private:
-  int* ptr_;
-
-  DISALLOW_COPY_AND_ASSIGN(ConDecLogger);
-};
-
-struct CountingDeleter {
-  explicit CountingDeleter(int* count) : count_(count) {}
-  inline void operator()(double* ptr) const {
-    (*count_)++;
-  }
-  int* count_;
-};
-
-// Used to test assignment of convertible deleters.
-struct CountingDeleterChild : public CountingDeleter {
-  explicit CountingDeleterChild(int* count) : CountingDeleter(count) {}
-};
-
-class OverloadedNewAndDelete {
- public:
-  void* operator new(size_t size) {
-    g_new_count++;
-    return malloc(size);
-  }
-
-  void operator delete(void* ptr) {
-    g_delete_count++;
-    free(ptr);
-  }
-
-  static void ResetCounters() {
-    g_new_count = 0;
-    g_delete_count = 0;
-  }
-
-  static int new_count() { return g_new_count; }
-  static int delete_count() { return g_delete_count; }
-
- private:
-  static int g_new_count;
-  static int g_delete_count;
-};
-
-int OverloadedNewAndDelete::g_new_count = 0;
-int OverloadedNewAndDelete::g_delete_count = 0;
-
-scoped_ptr<ConDecLogger> PassThru(scoped_ptr<ConDecLogger> logger) {
-  return logger;
-}
-
-void GrabAndDrop(scoped_ptr<ConDecLogger> logger) {
-}
-
-// Do not delete this function!  It's existence is to test that you can
-// return a temporarily constructed version of the scoper.
-scoped_ptr<ConDecLogger> TestReturnOfType(int* constructed) {
-  return scoped_ptr<ConDecLogger>(new ConDecLogger(constructed));
-}
-
-}  // namespace
-
-TEST(ScopedPtrTest, ScopedPtr) {
-  int constructed = 0;
-
-  // Ensure size of scoped_ptr<> doesn't increase unexpectedly.
-  static_assert(sizeof(int*) >= sizeof(scoped_ptr<int>),
-                "scoped_ptr shouldn't be larger than the raw pointer");
-
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    EXPECT_EQ(10, scoper->SomeMeth(10));
-    EXPECT_EQ(10, scoper.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test reset() and release()
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoper.reset();
-    EXPECT_EQ(0, constructed);
-    EXPECT_FALSE(scoper.get());
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    ConDecLogger* take = scoper.release();
-    EXPECT_EQ(1, constructed);
-    EXPECT_FALSE(scoper.get());
-    delete take;
-    EXPECT_EQ(0, constructed);
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test swap().
-  {
-    scoped_ptr<ConDecLogger> scoper1;
-    scoped_ptr<ConDecLogger> scoper2;
-    EXPECT_TRUE(scoper1.get() == scoper2.get());
-    EXPECT_FALSE(scoper1.get() != scoper2.get());
-
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoper1.reset(logger);
-    EXPECT_EQ(logger, scoper1.get());
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(logger, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, ScopedPtrDepthSubtyping) {
-  int constructed = 0;
-
-  // Test construction from a scoped_ptr to a derived class.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<ConDecLoggerParent> scoper_parent(std::move(scoper));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_parent.get());
-    EXPECT_FALSE(scoper.get());
-
-    EXPECT_EQ(10, scoper_parent->SomeMeth(10));
-    EXPECT_EQ(10, scoper_parent.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper_parent).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test assignment from a scoped_ptr to a derived class.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<ConDecLoggerParent> scoper_parent;
-    scoper_parent = std::move(scoper);
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_parent.get());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test construction of a scoped_ptr with an additional const annotation.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<const ConDecLogger> scoper_const(std::move(scoper));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_const.get());
-    EXPECT_FALSE(scoper.get());
-
-    EXPECT_EQ(10, scoper_const->SomeMeth(10));
-    EXPECT_EQ(10, scoper_const.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper_const).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test assignment to a scoped_ptr with an additional const annotation.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<const ConDecLogger> scoper_const;
-    scoper_const = std::move(scoper);
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_const.get());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test assignment to a scoped_ptr deleter of parent type.
-  {
-    // Custom deleters never touch these value.
-    double dummy_value, dummy_value2;
-    int deletes = 0;
-    int alternate_deletes = 0;
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    scoped_ptr<double, CountingDeleterChild> scoper_child(
-        &dummy_value2, CountingDeleterChild(&alternate_deletes));
-
-    EXPECT_TRUE(scoper);
-    EXPECT_TRUE(scoper_child);
-    EXPECT_EQ(0, deletes);
-    EXPECT_EQ(0, alternate_deletes);
-
-    // Test this compiles and correctly overwrites the deleter state.
-    scoper = std::move(scoper_child);
-    EXPECT_TRUE(scoper);
-    EXPECT_FALSE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(0, alternate_deletes);
-
-    scoper.reset();
-    EXPECT_FALSE(scoper);
-    EXPECT_FALSE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(1, alternate_deletes);
-
-    scoper_child.reset(&dummy_value);
-    EXPECT_TRUE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(1, alternate_deletes);
-    scoped_ptr<double, CountingDeleter> scoper_construct(
-        std::move(scoper_child));
-    EXPECT_TRUE(scoper_construct);
-    EXPECT_FALSE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(1, alternate_deletes);
-
-    scoper_construct.reset();
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(2, alternate_deletes);
-  }
-}
-
-TEST(ScopedPtrTest, ScopedPtrWithArray) {
-  static const int kNumLoggers = 12;
-
-  int constructed = 0;
-
-  {
-    scoped_ptr<ConDecLogger[]> scoper(new ConDecLogger[kNumLoggers]);
-    EXPECT_TRUE(scoper);
-    EXPECT_EQ(&scoper[0], scoper.get());
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-
-    EXPECT_EQ(10, scoper.get()->SomeMeth(10));
-    EXPECT_EQ(10, scoper[2].SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test reset() and release()
-  {
-    scoped_ptr<ConDecLogger[]> scoper;
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper.release());
-    EXPECT_FALSE(scoper.get());
-    scoper.reset();
-    EXPECT_FALSE(scoper.get());
-
-    scoper.reset(new ConDecLogger[kNumLoggers]);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-    scoper.reset();
-    EXPECT_EQ(0, constructed);
-
-    scoper.reset(new ConDecLogger[kNumLoggers]);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-    ConDecLogger* ptr = scoper.release();
-    EXPECT_EQ(12, constructed);
-    delete[] ptr;
-    EXPECT_EQ(0, constructed);
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test swap() and type-safe Boolean.
-  {
-    scoped_ptr<ConDecLogger[]> scoper1;
-    scoped_ptr<ConDecLogger[]> scoper2;
-    EXPECT_TRUE(scoper1.get() == scoper2.get());
-    EXPECT_FALSE(scoper1.get() != scoper2.get());
-
-    ConDecLogger* loggers = new ConDecLogger[kNumLoggers];
-    for (int i = 0; i < kNumLoggers; ++i) {
-      loggers[i].SetPtr(&constructed);
-    }
-    scoper1.reset(loggers);
-    EXPECT_TRUE(scoper1);
-    EXPECT_EQ(loggers, scoper1.get());
-    EXPECT_FALSE(scoper2);
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(loggers, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  {
-    ConDecLogger* loggers = new ConDecLogger[kNumLoggers];
-    scoped_ptr<ConDecLogger[]> scoper(loggers);
-    EXPECT_TRUE(scoper);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(kNumLoggers, constructed);
-
-    // Test moving with constructor;
-    scoped_ptr<ConDecLogger[]> scoper2(std::move(scoper));
-    EXPECT_EQ(kNumLoggers, constructed);
-
-    // Test moving with assignment;
-    scoped_ptr<ConDecLogger[]> scoper3;
-    scoper3 = std::move(scoper2);
-    EXPECT_EQ(kNumLoggers, constructed);
-    EXPECT_FALSE(scoper);
-    EXPECT_FALSE(scoper2);
-    EXPECT_TRUE(scoper3);
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, MoveBehavior) {
-  int constructed = 0;
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Test moving with constructor;
-    scoped_ptr<ConDecLogger> scoper2(std::move(scoper));
-    EXPECT_EQ(1, constructed);
-
-    // Test moving with assignment;
-    scoped_ptr<ConDecLogger> scoper3;
-    scoper3 = std::move(scoper2);
-    EXPECT_EQ(1, constructed);
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_TRUE(scoper3.get());
-  }
-
-#if !defined(OS_ANDROID) && !defined(OS_LINUX)
-  // Test uncaught Pass() does not have side effects, because Pass()
-  // is implemented by std::move().
-  // TODO(danakj): Remove this test case when we remove Pass().
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    scoped_ptr<ConDecLogger>&& rvalue = scoper.Pass();
-    // The Pass() function mimics std::move(), which does not have side-effects.
-    EXPECT_TRUE(scoper.get());
-    EXPECT_TRUE(rvalue);
-  }
-  EXPECT_EQ(0, constructed);
-#endif
-
-  // Test that passing to function which does nothing does not leak.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    GrabAndDrop(std::move(scoper));
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, ReturnTypeBehavior) {
-  int constructed = 0;
-
-  // Test that we can return a scoped_ptr.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    PassThru(std::move(scoper));
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test uncaught return type not leak.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    PassThru(std::move(scoper));
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Call TestReturnOfType() so the compiler doesn't warn for an unused
-  // function.
-  {
-    TestReturnOfType(&constructed);
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, CustomDeleter) {
-  double dummy_value;  // Custom deleter never touches this value.
-  int deletes = 0;
-  int alternate_deletes = 0;
-
-  // Normal delete support.
-  {
-    deletes = 0;
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    EXPECT_EQ(0, deletes);
-    EXPECT_TRUE(scoper.get());
-  }
-  EXPECT_EQ(1, deletes);
-
-  // Test reset() and release().
-  deletes = 0;
-  {
-    scoped_ptr<double, CountingDeleter> scoper(NULL,
-                                               CountingDeleter(&deletes));
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper.release());
-    EXPECT_FALSE(scoper.get());
-    scoper.reset();
-    EXPECT_FALSE(scoper.get());
-    EXPECT_EQ(0, deletes);
-
-    scoper.reset(&dummy_value);
-    scoper.reset();
-    EXPECT_EQ(1, deletes);
-
-    scoper.reset(&dummy_value);
-    EXPECT_EQ(&dummy_value, scoper.release());
-  }
-  EXPECT_EQ(1, deletes);
-
-  // Test get_deleter().
-  deletes = 0;
-  alternate_deletes = 0;
-  {
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    // Call deleter manually.
-    EXPECT_EQ(0, deletes);
-    scoper.get_deleter()(&dummy_value);
-    EXPECT_EQ(1, deletes);
-
-    // Deleter is still there after reset.
-    scoper.reset();
-    EXPECT_EQ(2, deletes);
-    scoper.get_deleter()(&dummy_value);
-    EXPECT_EQ(3, deletes);
-
-    // Deleter can be assigned into (matches C++11 unique_ptr<> spec).
-    scoper.get_deleter() = CountingDeleter(&alternate_deletes);
-    scoper.reset(&dummy_value);
-    EXPECT_EQ(0, alternate_deletes);
-
-  }
-  EXPECT_EQ(3, deletes);
-  EXPECT_EQ(1, alternate_deletes);
-
-  // Test operator= deleter support.
-  deletes = 0;
-  alternate_deletes = 0;
-  {
-    double dummy_value2;
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    scoped_ptr<double, CountingDeleter> scoper2(
-        &dummy_value2,
-        CountingDeleter(&alternate_deletes));
-    EXPECT_EQ(0, deletes);
-    EXPECT_EQ(0, alternate_deletes);
-
-    // Pass the second deleter through a constructor and an operator=. Then
-    // reinitialize the empty scopers to ensure that each one is deleting
-    // properly.
-    scoped_ptr<double, CountingDeleter> scoper3(std::move(scoper2));
-    scoper = std::move(scoper3);
-    EXPECT_EQ(1, deletes);
-
-    scoper2.reset(&dummy_value2);
-    scoper3.reset(&dummy_value2);
-    EXPECT_EQ(0, alternate_deletes);
-
-  }
-  EXPECT_EQ(1, deletes);
-  EXPECT_EQ(3, alternate_deletes);
-
-  // Test swap(), and type-safe Boolean.
-  {
-    scoped_ptr<double, CountingDeleter> scoper1(NULL,
-                                                CountingDeleter(&deletes));
-    scoped_ptr<double, CountingDeleter> scoper2(NULL,
-                                                CountingDeleter(&deletes));
-    EXPECT_TRUE(scoper1.get() == scoper2.get());
-    EXPECT_FALSE(scoper1.get() != scoper2.get());
-
-    scoper1.reset(&dummy_value);
-    EXPECT_TRUE(scoper1);
-    EXPECT_EQ(&dummy_value, scoper1.get());
-    EXPECT_FALSE(scoper2);
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(&dummy_value, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-  }
-}
-
-// Sanity check test for overloaded new and delete operators. Does not do full
-// coverage of reset/release/move operations as that is redundant with the
-// above.
-TEST(ScopedPtrTest, OverloadedNewAndDelete) {
-  {
-    OverloadedNewAndDelete::ResetCounters();
-    scoped_ptr<OverloadedNewAndDelete> scoper(new OverloadedNewAndDelete());
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<OverloadedNewAndDelete> scoper2(std::move(scoper));
-  }
-  EXPECT_EQ(1, OverloadedNewAndDelete::delete_count());
-  EXPECT_EQ(1, OverloadedNewAndDelete::new_count());
-}
-
-scoped_ptr<int> NullIntReturn() {
-  return nullptr;
-}
-
-TEST(ScopedPtrTest, Nullptr) {
-  scoped_ptr<int> scoper1(nullptr);
-  scoped_ptr<int> scoper2(new int);
-  scoper2 = nullptr;
-  scoped_ptr<int> scoper3(NullIntReturn());
-  scoped_ptr<int> scoper4 = NullIntReturn();
-  EXPECT_EQ(nullptr, scoper1.get());
-  EXPECT_EQ(nullptr, scoper2.get());
-  EXPECT_EQ(nullptr, scoper3.get());
-  EXPECT_EQ(nullptr, scoper4.get());
-}
-
-scoped_ptr<int[]> NullIntArrayReturn() {
-  return nullptr;
-}
-
-TEST(ScopedPtrTest, NullptrArray) {
-  scoped_ptr<int[]> scoper1(nullptr);
-  scoped_ptr<int[]> scoper2(new int[3]);
-  scoper2 = nullptr;
-  scoped_ptr<int[]> scoper3(NullIntArrayReturn());
-  scoped_ptr<int[]> scoper4 = NullIntArrayReturn();
-  EXPECT_EQ(nullptr, scoper1.get());
-  EXPECT_EQ(nullptr, scoper2.get());
-  EXPECT_EQ(nullptr, scoper3.get());
-  EXPECT_EQ(nullptr, scoper4.get());
-}
-
-class Super {};
-class Sub : public Super {};
-
-scoped_ptr<Sub> SubClassReturn() {
-  return make_scoped_ptr(new Sub);
-}
-
-TEST(ScopedPtrTest, Conversion) {
-  scoped_ptr<Sub> sub1(new Sub);
-  scoped_ptr<Sub> sub2(new Sub);
-
-  // Upcast with move works.
-  scoped_ptr<Super> super1 = std::move(sub1);
-  super1 = std::move(sub2);
-
-  // Upcast with an rvalue works.
-  scoped_ptr<Super> super2 = SubClassReturn();
-  super2 = SubClassReturn();
-}
-
-// Logging a scoped_ptr<T> to an ostream shouldn't convert it to a boolean
-// value first.
-TEST(ScopedPtrTest, LoggingDoesntConvertToBoolean) {
-  scoped_ptr<int> x(new int);
-  std::stringstream s1;
-  s1 << x;
-
-  std::stringstream s2;
-  s2 << x.get();
-
-  EXPECT_EQ(s2.str(), s1.str());
-}
-
-TEST(ScopedPtrTest, ReferenceCycle) {
-  struct StructB;
-  struct StructA {
-    scoped_ptr<StructB> b;
-  };
-
-  struct StructB {
-    scoped_ptr<StructA> a;
-  };
-
-  // Create a reference cycle.
-  StructA* a = new StructA;
-  a->b.reset(new StructB);
-  a->b->a.reset(a);
-
-  // Break the cycle by calling reset(). This will cause |a| (and hence, |a->b|)
-  // to be deleted before the call to reset() returns. This tests that the
-  // implementation of scoped_ptr::reset() doesn't access |this| after it
-  // deletes the underlying pointer. This behaviour is consistent with the
-  // definition of unique_ptr::reset in C++11.
-  a->b.reset();
-
-  // Go again, but this time, break the cycle by invoking |a|'s destructor. This
-  // tests that the implementation of ~scoped_ptr doesn't infinitely recurse
-  // into the destructors of |a| and |a->b|. Note, deleting |a| instead will
-  // cause |a| to be double-free'd because |a->b| owns |a| and deletes it via
-  // its destructor.
-  a = new StructA;
-  a->b.reset(new StructB);
-  a->b->a.reset(a);
-  a->~StructA();
-}
-
-TEST(ScopedPtrTest, Operators) {
-  struct Parent {};
-  struct Child : public Parent {};
-
-  scoped_ptr<Parent> p(new Parent);
-  scoped_ptr<Parent> p2(new Parent);
-  scoped_ptr<Child> c(new Child);
-  scoped_ptr<Parent> pnull;
-
-  // Operator==.
-  EXPECT_TRUE(p == p);
-  EXPECT_FALSE(p == c);
-  EXPECT_FALSE(p == p2);
-  EXPECT_FALSE(p == pnull);
-
-  EXPECT_FALSE(p == nullptr);
-  EXPECT_FALSE(nullptr == p);
-  EXPECT_TRUE(pnull == nullptr);
-  EXPECT_TRUE(nullptr == pnull);
-
-  // Operator!=.
-  EXPECT_FALSE(p != p);
-  EXPECT_TRUE(p != c);
-  EXPECT_TRUE(p != p2);
-  EXPECT_TRUE(p != pnull);
-
-  EXPECT_TRUE(p != nullptr);
-  EXPECT_TRUE(nullptr != p);
-  EXPECT_FALSE(pnull != nullptr);
-  EXPECT_FALSE(nullptr != pnull);
-
-  // Compare two scoped_ptr<T>.
-  EXPECT_EQ(p.get() < p2.get(), p < p2);
-  EXPECT_EQ(p.get() <= p2.get(), p <= p2);
-  EXPECT_EQ(p.get() > p2.get(), p > p2);
-  EXPECT_EQ(p.get() >= p2.get(), p >= p2);
-  EXPECT_EQ(p2.get() < p.get(), p2 < p);
-  EXPECT_EQ(p2.get() <= p.get(), p2 <= p);
-  EXPECT_EQ(p2.get() > p.get(), p2 > p);
-  EXPECT_EQ(p2.get() >= p.get(), p2 >= p);
-
-  // And convertible scoped_ptr<T> and scoped_ptr<U>.
-  EXPECT_EQ(p.get() < c.get(), p < c);
-  EXPECT_EQ(p.get() <= c.get(), p <= c);
-  EXPECT_EQ(p.get() > c.get(), p > c);
-  EXPECT_EQ(p.get() >= c.get(), p >= c);
-  EXPECT_EQ(c.get() < p.get(), c < p);
-  EXPECT_EQ(c.get() <= p.get(), c <= p);
-  EXPECT_EQ(c.get() > p.get(), c > p);
-  EXPECT_EQ(c.get() >= p.get(), c >= p);
-
-  // Compare to nullptr.
-  EXPECT_TRUE(p > nullptr);
-  EXPECT_FALSE(nullptr > p);
-  EXPECT_FALSE(pnull > nullptr);
-  EXPECT_FALSE(nullptr > pnull);
-
-  EXPECT_TRUE(p >= nullptr);
-  EXPECT_FALSE(nullptr >= p);
-  EXPECT_TRUE(pnull >= nullptr);
-  EXPECT_TRUE(nullptr >= pnull);
-
-  EXPECT_FALSE(p < nullptr);
-  EXPECT_TRUE(nullptr < p);
-  EXPECT_FALSE(pnull < nullptr);
-  EXPECT_FALSE(nullptr < pnull);
-
-  EXPECT_FALSE(p <= nullptr);
-  EXPECT_TRUE(nullptr <= p);
-  EXPECT_TRUE(pnull <= nullptr);
-  EXPECT_TRUE(nullptr <= pnull);
-};
-
-TEST(ScopedPtrTest, ArrayOperators) {
-  struct Parent {};
-  struct Child : public Parent {};
-
-  scoped_ptr<Parent[]> p(new Parent[1]);
-  scoped_ptr<Parent[]> p2(new Parent[1]);
-  scoped_ptr<Child[]> c(new Child[1]);
-  scoped_ptr<Parent[]> pnull;
-
-  // Operator==.
-  EXPECT_TRUE(p == p);
-  EXPECT_FALSE(p == c);
-  EXPECT_FALSE(p == p2);
-  EXPECT_FALSE(p == pnull);
-
-  EXPECT_FALSE(p == nullptr);
-  EXPECT_FALSE(nullptr == p);
-  EXPECT_TRUE(pnull == nullptr);
-  EXPECT_TRUE(nullptr == pnull);
-
-  // Operator!=.
-  EXPECT_FALSE(p != p);
-  EXPECT_TRUE(p != c);
-  EXPECT_TRUE(p != p2);
-  EXPECT_TRUE(p != pnull);
-
-  EXPECT_TRUE(p != nullptr);
-  EXPECT_TRUE(nullptr != p);
-  EXPECT_FALSE(pnull != nullptr);
-  EXPECT_FALSE(nullptr != pnull);
-
-  // Compare two scoped_ptr<T>.
-  EXPECT_EQ(p.get() < p2.get(), p < p2);
-  EXPECT_EQ(p.get() <= p2.get(), p <= p2);
-  EXPECT_EQ(p.get() > p2.get(), p > p2);
-  EXPECT_EQ(p.get() >= p2.get(), p >= p2);
-  EXPECT_EQ(p2.get() < p.get(), p2 < p);
-  EXPECT_EQ(p2.get() <= p.get(), p2 <= p);
-  EXPECT_EQ(p2.get() > p.get(), p2 > p);
-  EXPECT_EQ(p2.get() >= p.get(), p2 >= p);
-
-  // And convertible scoped_ptr<T> and scoped_ptr<U>.
-  EXPECT_EQ(p.get() < c.get(), p < c);
-  EXPECT_EQ(p.get() <= c.get(), p <= c);
-  EXPECT_EQ(p.get() > c.get(), p > c);
-  EXPECT_EQ(p.get() >= c.get(), p >= c);
-  EXPECT_EQ(c.get() < p.get(), c < p);
-  EXPECT_EQ(c.get() <= p.get(), c <= p);
-  EXPECT_EQ(c.get() > p.get(), c > p);
-  EXPECT_EQ(c.get() >= p.get(), c >= p);
-
-  // Compare to nullptr.
-  EXPECT_TRUE(p > nullptr);
-  EXPECT_FALSE(nullptr > p);
-  EXPECT_FALSE(pnull > nullptr);
-  EXPECT_FALSE(nullptr > pnull);
-
-  EXPECT_TRUE(p >= nullptr);
-  EXPECT_FALSE(nullptr >= p);
-  EXPECT_TRUE(pnull >= nullptr);
-  EXPECT_TRUE(nullptr >= pnull);
-
-  EXPECT_FALSE(p < nullptr);
-  EXPECT_TRUE(nullptr < p);
-  EXPECT_FALSE(pnull < nullptr);
-  EXPECT_FALSE(nullptr < pnull);
-
-  EXPECT_FALSE(p <= nullptr);
-  EXPECT_TRUE(nullptr <= p);
-  EXPECT_TRUE(pnull <= nullptr);
-  EXPECT_TRUE(nullptr <= pnull);
-}
diff --git a/base/memory/scoped_ptr_unittest.nc b/base/memory/scoped_ptr_unittest.nc
deleted file mode 100644
index 10b45a1..0000000
--- a/base/memory/scoped_ptr_unittest.nc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/memory/scoped_ptr.h"
-
-#include <utility>
-
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-
-namespace {
-
-class Parent {
-};
-
-class Child : public Parent {
-};
-
-class RefCountedClass : public base::RefCountedThreadSafe<RefCountedClass> {
-};
-
-}  // namespace
-
-#if defined(NCTEST_NO_PASS_DOWNCAST)  // [r"fatal error: no viable conversion from returned value of type 'scoped_ptr<\(anonymous namespace\)::Parent>' to function return type 'scoped_ptr<\(anonymous namespace\)::Child>'"]
-
-scoped_ptr<Child> DowncastUsingPassAs(scoped_ptr<Parent> object) {
-  return object;
-}
-
-#elif defined(NCTEST_NO_REF_COUNTED_SCOPED_PTR)  // [r"fatal error: static_assert failed \"T is a refcounted type and needs a scoped_refptr\""]
-
-// scoped_ptr<> should not work for ref-counted objects.
-void WontCompile() {
-  scoped_ptr<RefCountedClass> x;
-}
-
-#elif defined(NCTEST_NO_ARRAY_WITH_SIZE)  // [r"fatal error: static_assert failed \"scoped_ptr doesn't support array with size\""]
-
-void WontCompile() {
-  scoped_ptr<int[10]> x;
-}
-
-#elif defined(NCTEST_NO_PASS_FROM_ARRAY)  // [r"fatal error: no viable overloaded '='"]
-
-void WontCompile() {
-  scoped_ptr<int[]> a;
-  scoped_ptr<int*> b;
-  b = std::move(a);
-}
-
-#elif defined(NCTEST_NO_PASS_TO_ARRAY)  // [r"fatal error: no viable overloaded '='"]
-
-void WontCompile() {
-  scoped_ptr<int*> a;
-  scoped_ptr<int[]> b;
-  b = std::move(a);
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_FROM_ARRAY)  // [r"fatal error: no matching constructor for initialization of 'scoped_ptr<int \*>'"]
-
-void WontCompile() {
-  scoped_ptr<int[]> a;
-  scoped_ptr<int*> b(std::move(a));
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_TO_ARRAY)  // [r"fatal error: no matching constructor for initialization of 'scoped_ptr<int \[\]>'"]
-
-void WontCompile() {
-  scoped_ptr<int*> a;
-  scoped_ptr<int[]> b(std::move(a));
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_SCOPED_PTR_ARRAY_FROM_NULL)  // [r"is ambiguous"]
-
-void WontCompile() {
-  scoped_ptr<int[]> x(NULL);
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_SCOPED_PTR_ARRAY_FROM_DERIVED)  // [r"fatal error: calling a private constructor of class 'scoped_ptr<\(anonymous namespace\)::Parent \[\], std::default_delete<\(anonymous namespace\)::Parent \[\]> >'"]
-
-void WontCompile() {
-  scoped_ptr<Parent[]> x(new Child[1]);
-}
-
-#elif defined(NCTEST_NO_RESET_SCOPED_PTR_ARRAY_FROM_NULL)  // [r"is ambiguous"]
-
-void WontCompile() {
-  scoped_ptr<int[]> x;
-  x.reset(NULL);
-}
-
-#elif defined(NCTEST_NO_RESET_SCOPED_PTR_ARRAY_FROM_DERIVED)  // [r"fatal error: 'reset' is a private member of 'scoped_ptr<\(anonymous namespace\)::Parent \[\], std::default_delete<\(anonymous namespace\)::Parent \[\]> >'"]
-
-void WontCompile() {
-  scoped_ptr<Parent[]> x;
-  x.reset(new Child[1]);
-}
-
-#elif defined(NCTEST_NO_DELETER_REFERENCE)  // [r"fatal error: base specifier must name a class"]
-
-struct Deleter {
-  void operator()(int*) {}
-};
-
-// Current implementation doesn't support Deleter Reference types. Enabling
-// support would require changes to the behavior of the constructors to match
-// including the use of SFINAE to discard the type-converting constructor
-// as per C++11 20.7.1.2.1.19.
-void WontCompile() {
-  Deleter d;
-  int n;
-  scoped_ptr<int*, Deleter&> a(&n, d);
-}
-
-#endif
diff --git a/base/memory/scoped_vector.h b/base/memory/scoped_vector.h
index 6730612..f3581ea 100644
--- a/base/memory/scoped_vector.h
+++ b/base/memory/scoped_vector.h
@@ -7,11 +7,11 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/move.h"
+#include "base/macros.h"
 #include "base/stl_util.h"
 
 // ScopedVector wraps a vector deleting the elements from its
@@ -21,8 +21,6 @@
 // we have support for moveable types inside containers).
 template <class T>
 class ScopedVector {
-  MOVE_ONLY_TYPE_FOR_CPP_03(ScopedVector)
-
  public:
   typedef typename std::vector<T*>::allocator_type allocator_type;
   typedef typename std::vector<T*>::size_type size_type;
@@ -69,7 +67,7 @@
   reference back() { return v_.back(); }
 
   void push_back(T* elem) { v_.push_back(elem); }
-  void push_back(scoped_ptr<T> elem) { v_.push_back(elem.release()); }
+  void push_back(std::unique_ptr<T> elem) { v_.push_back(elem.release()); }
 
   void pop_back() {
     DCHECK(!empty());
@@ -110,7 +108,7 @@
     return v_.insert(position, x);
   }
 
-  iterator insert(iterator position, scoped_ptr<T> x) {
+  iterator insert(iterator position, std::unique_ptr<T> x) {
     return v_.insert(position, x.release());
   }
 
@@ -142,6 +140,8 @@
 
  private:
   std::vector<T*> v_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedVector);
 };
 
 #endif  // BASE_MEMORY_SCOPED_VECTOR_H_
diff --git a/base/memory/scoped_vector_unittest.cc b/base/memory/scoped_vector_unittest.cc
index 8638ece..ea3dcdc 100644
--- a/base/memory/scoped_vector_unittest.cc
+++ b/base/memory/scoped_vector_unittest.cc
@@ -4,12 +4,12 @@
 
 #include "base/memory/scoped_vector.h"
 
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
 #include "base/callback.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
@@ -112,7 +112,7 @@
 
  private:
   LifeCycleState life_cycle_state_;
-  scoped_ptr<LifeCycleObject> constructed_life_cycle_object_;
+  std::unique_ptr<LifeCycleObject> constructed_life_cycle_object_;
 
   DISALLOW_COPY_AND_ASSIGN(LifeCycleWatcher);
 };
@@ -325,7 +325,7 @@
 // Assertions for push_back(scoped_ptr).
 TEST(ScopedVectorTest, PushBackScopedPtr) {
   int delete_counter = 0;
-  scoped_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
+  std::unique_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
   EXPECT_EQ(0, delete_counter);
   {
     ScopedVector<DeleteCounter> v;
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
index a94b399..e1c9fa7 100644
--- a/base/memory/shared_memory.h
+++ b/base/memory/shared_memory.h
@@ -24,6 +24,10 @@
 #include "base/files/scoped_file.h"
 #endif
 
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
 namespace base {
 
 class FilePath;
@@ -32,10 +36,7 @@
 struct BASE_EXPORT SharedMemoryCreateOptions {
   SharedMemoryCreateOptions();
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The type of OS primitive that should back the SharedMemory object.
-  SharedMemoryHandle::Type type;
-#else
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
   // DEPRECATED (crbug.com/345734):
   // If NULL, the object is anonymous.  This pointer is owned by the caller
   // and must live through the call to Create().
@@ -47,7 +48,7 @@
   // shared memory must not exist.  This flag is meaningless unless
   // name_deprecated is non-NULL.
   bool open_existing_deprecated;
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS))
 
   // Size of the shared memory object to be created.
   // When opening an existing object, this has no effect.
@@ -82,15 +83,6 @@
   // that |read_only| matches the permissions of the handle.
   SharedMemory(const SharedMemoryHandle& handle, bool read_only);
 
-#if defined(OS_WIN)
-  // Create a new SharedMemory object from an existing, open
-  // shared memory file that was created by a remote process and not shared
-  // to the current process.
-  SharedMemory(const SharedMemoryHandle& handle,
-               bool read_only,
-               ProcessHandle process);
-#endif
-
   // Closes any open files.
   ~SharedMemory();
 
@@ -111,7 +103,7 @@
   // The caller is responsible for destroying the duplicated OS primitive.
   static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
 
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
   // This method requires that the SharedMemoryHandle is backed by a POSIX fd.
   static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
 #endif
@@ -132,16 +124,6 @@
   // Returns true on success and false on failure.
   bool CreateAndMapAnonymous(size_t size);
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // These two methods are analogs of CreateAndMapAnonymous and CreateAnonymous
-  // that force the underlying OS primitive to be a POSIX fd. Do not add new
-  // uses of these methods unless absolutely necessary, since constructing a
-  // fd-backed SharedMemory object frequently takes 100ms+.
-  // http://crbug.com/466437.
-  bool CreateAndMapAnonymousPosix(size_t size);
-  bool CreateAnonymousPosix(size_t size);
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
-
   // Creates an anonymous shared memory segment of size size.
   // Returns true on success and false on failure.
   bool CreateAnonymous(size_t size) {
@@ -266,12 +248,11 @@
   }
 
  private:
-#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID)
+#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
+    !(defined(OS_MACOSX) && !defined(OS_IOS))
   bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly);
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
   bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
 #endif
-#endif  // defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID)
   enum ShareMode {
     SHARE_READONLY,
     SHARE_CURRENT_MODE,
@@ -286,16 +267,10 @@
   // before being mapped.
   bool external_section_;
   std::wstring       name_;
-  HANDLE             mapped_file_;
+  win::ScopedHandle  mapped_file_;
 #elif defined(OS_MACOSX) && !defined(OS_IOS)
   // The OS primitive that backs the shared memory region.
   SharedMemoryHandle shm_;
-
-  // The mechanism by which the memory is mapped. Only valid if |memory_| is not
-  // |nullptr|.
-  SharedMemoryHandle::Type mapped_memory_mechanism_;
-
-  int readonly_mapped_file_;
 #elif defined(OS_POSIX)
   int                mapped_file_;
   int                readonly_mapped_file_;
diff --git a/base/memory/shared_memory_android.cc b/base/memory/shared_memory_android.cc
new file mode 100644
index 0000000..5ac6776
--- /dev/null
+++ b/base/memory/shared_memory_android.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <stddef.h>
+#include <sys/mman.h>
+
+#include "base/logging.h"
+
+#if defined(__ANDROID__)
+#include <cutils/ashmem.h>
+#else
+#include "third_party/ashmem/ashmem.h"
+#endif
+
+namespace base {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK_EQ(-1, mapped_file_ );
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // "name" is just a label in ashmem. It is visible in /proc/pid/maps.
+  mapped_file_ = ashmem_create_region(
+      options.name_deprecated == NULL ? "" : options.name_deprecated->c_str(),
+      options.size);
+  if (-1 == mapped_file_) {
+    DLOG(ERROR) << "Shared memory creation failed";
+    return false;
+  }
+
+  int err = ashmem_set_prot_region(mapped_file_,
+                                   PROT_READ | PROT_WRITE | PROT_EXEC);
+  if (err < 0) {
+    DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
+    return false;
+  }
+
+  // Android doesn't appear to have a way to drop write access on an ashmem
+  // segment for a single descriptor.  http://crbug.com/320865
+  readonly_mapped_file_ = dup(mapped_file_);
+  if (-1 == readonly_mapped_file_) {
+    DPLOG(ERROR) << "dup() failed";
+    return false;
+  }
+
+  requested_size_ = options.size;
+
+  return true;
+}
+
+bool SharedMemory::Delete(const std::string&) {
+  // Like on Windows, this is intentionally returning true as ashmem will
+  // automatically releases the resource when all FDs on it are closed.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string&, bool /*read_only*/) {
+  // ashmem doesn't support name mapping
+  NOTIMPLEMENTED();
+  return false;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
new file mode 100644
index 0000000..8eff26b
--- /dev/null
+++ b/base/memory/shared_memory_handle.h
@@ -0,0 +1,164 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+#define BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/process/process_handle.h"
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#endif
+
+namespace base {
+
+class Pickle;
+
+// SharedMemoryHandle is a platform specific type which represents
+// the underlying OS handle to a shared memory segment.
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+typedef FileDescriptor SharedMemoryHandle;
+#elif defined(OS_WIN)
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+  // The default constructor returns an invalid SharedMemoryHandle.
+  SharedMemoryHandle();
+  SharedMemoryHandle(HANDLE h, base::ProcessId pid);
+
+  // Standard copy constructor. The new instance shares the underlying OS
+  // primitives.
+  SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+  // Standard assignment operator. The updated instance shares the underlying
+  // OS primitives.
+  SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+  // Comparison operators.
+  bool operator==(const SharedMemoryHandle& handle) const;
+  bool operator!=(const SharedMemoryHandle& handle) const;
+
+  // Closes the underlying OS resources.
+  void Close() const;
+
+  // Whether the underlying OS primitive is valid.
+  bool IsValid() const;
+
+  // Whether |pid_| is the same as the current process's id.
+  bool BelongsToCurrentProcess() const;
+
+  // Whether handle_ needs to be duplicated into the destination process when
+  // an instance of this class is passed over a Chrome IPC channel.
+  bool NeedsBrokering() const;
+
+  void SetOwnershipPassesToIPC(bool ownership_passes);
+  bool OwnershipPassesToIPC() const;
+
+  HANDLE GetHandle() const;
+  base::ProcessId GetPID() const;
+
+ private:
+  HANDLE handle_;
+
+  // The process in which |handle_| is valid and can be used. If |handle_| is
+  // invalid, this will be kNullProcessId.
+  base::ProcessId pid_;
+
+  // Whether passing this object as a parameter to an IPC message passes
+  // ownership of |handle_| to the IPC stack. This is meant to mimic the
+  // behavior of the |auto_close| parameter of FileDescriptor. This member only
+  // affects attachment-brokered SharedMemoryHandles.
+  // Defaults to |false|.
+  bool ownership_passes_to_ipc_;
+};
+#else
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+  // The default constructor returns an invalid SharedMemoryHandle.
+  SharedMemoryHandle();
+
+  // Makes a Mach-based SharedMemoryHandle of the given size. On error,
+  // subsequent calls to IsValid() return false.
+  explicit SharedMemoryHandle(mach_vm_size_t size);
+
+  // Makes a Mach-based SharedMemoryHandle from |memory_object|, a named entry
+  // in the task with process id |pid|. The memory region has size |size|.
+  SharedMemoryHandle(mach_port_t memory_object,
+                     mach_vm_size_t size,
+                     base::ProcessId pid);
+
+  // Standard copy constructor. The new instance shares the underlying OS
+  // primitives.
+  SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+  // Standard assignment operator. The updated instance shares the underlying
+  // OS primitives.
+  SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+  // Duplicates the underlying OS resources.
+  SharedMemoryHandle Duplicate() const;
+
+  // Comparison operators.
+  bool operator==(const SharedMemoryHandle& handle) const;
+  bool operator!=(const SharedMemoryHandle& handle) const;
+
+  // Whether the underlying OS primitive is valid. Once the SharedMemoryHandle
+  // is backed by a valid OS primitive, it becomes immutable.
+  bool IsValid() const;
+
+  // Exposed so that the SharedMemoryHandle can be transported between
+  // processes.
+  mach_port_t GetMemoryObject() const;
+
+  // Returns false on a failure to determine the size. On success, populates the
+  // output variable |size|. Returns 0 if the handle is invalid.
+  bool GetSize(size_t* size) const;
+
+  // The SharedMemoryHandle must be valid.
+  // Returns whether the SharedMemoryHandle was successfully mapped into memory.
+  // On success, |memory| is an output variable that contains the start of the
+  // mapped memory.
+  bool MapAt(off_t offset, size_t bytes, void** memory, bool read_only);
+
+  // Closes the underlying OS primitive.
+  void Close() const;
+
+  void SetOwnershipPassesToIPC(bool ownership_passes);
+  bool OwnershipPassesToIPC() const;
+
+ private:
+  // Shared code between copy constructor and operator=.
+  void CopyRelevantData(const SharedMemoryHandle& handle);
+
+  mach_port_t memory_object_ = MACH_PORT_NULL;
+
+  // The size of the shared memory region when |type_| is MACH. Only
+  // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+  mach_vm_size_t size_ = 0;
+
+  // The pid of the process in which |memory_object_| is usable. Only
+  // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+  base::ProcessId pid_ = 0;
+
+  // Whether passing this object as a parameter to an IPC message passes
+  // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+  // the behavior of the |auto_close| parameter of FileDescriptor.
+  // Defaults to |false|.
+  bool ownership_passes_to_ipc_ = false;
+};
+#endif
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
diff --git a/base/memory/shared_memory_handle_mac.cc b/base/memory/shared_memory_handle_mac.cc
new file mode 100644
index 0000000..ad470be
--- /dev/null
+++ b/base/memory/shared_memory_handle_mac.cc
@@ -0,0 +1,146 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <mach/mach_vm.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/mac/mac_util.h"
+#include "base/posix/eintr_wrapper.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
+  mach_port_t named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(),
+      &size,
+      0,  // Address.
+      MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+      &named_right,
+      MACH_PORT_NULL);  // Parent handle.
+  if (kr != KERN_SUCCESS) {
+    memory_object_ = MACH_PORT_NULL;
+    return;
+  }
+
+  memory_object_ = named_right;
+  size_ = size;
+  pid_ = GetCurrentProcId();
+  ownership_passes_to_ipc_ = false;
+}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
+                                       mach_vm_size_t size,
+                                       base::ProcessId pid)
+    : memory_object_(memory_object),
+      size_(size),
+      pid_(pid),
+      ownership_passes_to_ipc_(false) {}
+
+SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle) {
+  CopyRelevantData(handle);
+}
+
+SharedMemoryHandle& SharedMemoryHandle::operator=(
+    const SharedMemoryHandle& handle) {
+  if (this == &handle)
+    return *this;
+
+  CopyRelevantData(handle);
+  return *this;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  if (!IsValid())
+    return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
+
+  // Increment the ref count.
+  kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+                                        MACH_PORT_RIGHT_SEND, 1);
+  DCHECK_EQ(kr, KERN_SUCCESS);
+  SharedMemoryHandle handle(*this);
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
+  if (!IsValid() && !handle.IsValid())
+    return true;
+
+  return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
+         pid_ == handle.pid_;
+}
+
+bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
+  return !(*this == handle);
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return memory_object_ != MACH_PORT_NULL;
+}
+
+mach_port_t SharedMemoryHandle::GetMemoryObject() const {
+  return memory_object_;
+}
+
+bool SharedMemoryHandle::GetSize(size_t* size) const {
+  if (!IsValid()) {
+    *size = 0;
+    return true;
+  }
+
+  *size = size_;
+  return true;
+}
+
+bool SharedMemoryHandle::MapAt(off_t offset,
+                               size_t bytes,
+                               void** memory,
+                               bool read_only) {
+  DCHECK(IsValid());
+  DCHECK_EQ(pid_, GetCurrentProcId());
+  kern_return_t kr = mach_vm_map(
+      mach_task_self(),
+      reinterpret_cast<mach_vm_address_t*>(memory),  // Output parameter
+      bytes,
+      0,  // Alignment mask
+      VM_FLAGS_ANYWHERE, memory_object_, offset,
+      FALSE,                                           // Copy
+      VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE),  // Current protection
+      VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK,  // Maximum protection
+      VM_INHERIT_NONE);
+  return kr == KERN_SUCCESS;
+}
+
+void SharedMemoryHandle::Close() const {
+  if (!IsValid())
+    return;
+
+  kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+  if (kr != KERN_SUCCESS)
+    DPLOG(ERROR) << "Error deallocating mach port: " << kr;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return ownership_passes_to_ipc_;
+}
+
+void SharedMemoryHandle::CopyRelevantData(const SharedMemoryHandle& handle) {
+  memory_object_ = handle.memory_object_;
+  size_ = handle.size_;
+  pid_ = handle.pid_;
+  ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
new file mode 100644
index 0000000..d15c632
--- /dev/null
+++ b/base/memory/shared_memory_mac.cc
@@ -0,0 +1,218 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <mach/mach_vm.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace {
+
+// Returns whether the operation succeeded.
+// |new_handle| is an output variable, populated on success. The caller takes
+// ownership of the underlying memory object.
+// |handle| is the handle to copy.
+// If |handle| is already mapped, |mapped_addr| is its mapped location.
+// Otherwise, |mapped_addr| should be |nullptr|.
+bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
+                                        SharedMemoryHandle handle,
+                                        void* mapped_addr) {
+  if (!handle.IsValid())
+    return false;
+
+  size_t size;
+  CHECK(handle.GetSize(&size));
+
+  // Map if necessary.
+  void* temp_addr = mapped_addr;
+  base::mac::ScopedMachVM scoper;
+  if (!temp_addr) {
+    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+    kern_return_t kr = mach_vm_map(
+        mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+        size, 0, VM_FLAGS_ANYWHERE, handle.GetMemoryObject(), 0, FALSE,
+        VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+    if (kr != KERN_SUCCESS)
+      return false;
+    scoper.reset(reinterpret_cast<vm_address_t>(temp_addr),
+                 mach_vm_round_page(size));
+  }
+
+  // Make new memory object.
+  mach_port_t named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), reinterpret_cast<memory_object_size_t*>(&size),
+      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+      &named_right, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS)
+    return false;
+
+  *new_handle = SharedMemoryHandle(named_right, size, base::GetCurrentProcId());
+  return true;
+}
+
+}  // namespace
+
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+    : size(0),
+      executable(false),
+      share_read_only(false) {}
+
+SharedMemory::SharedMemory()
+    : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(read_only),
+      requested_size_(0) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+  return SharedMemoryHandle();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  // This should be effectively unlimited on OS X.
+  return 10000;
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+// static
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t* size) {
+  return handle.GetSize(size);
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+  // is fixed.
+  tracked_objects::ScopedTracker tracking_profile1(
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(
+          "466437 SharedMemory::Create::Start"));
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0) return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  shm_ = SharedMemoryHandle(options.size);
+  requested_size_ = options.size;
+  return shm_.IsValid();
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+  if (memory_)
+    return false;
+
+  bool success = shm_.MapAt(offset, bytes, &memory_, read_only_);
+  if (success) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+                      (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  } else {
+    memory_ = NULL;
+  }
+
+  return success;
+}
+
+bool SharedMemory::Unmap() {
+  if (memory_ == NULL)
+    return false;
+
+  mach_vm_deallocate(mach_task_self(),
+                     reinterpret_cast<mach_vm_address_t>(memory_),
+                     mapped_size_);
+  memory_ = NULL;
+  mapped_size_ = 0;
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+void SharedMemory::Close() {
+  shm_.Close();
+  shm_ = SharedMemoryHandle();
+}
+
+bool SharedMemory::ShareToProcessCommon(ProcessHandle /*process*/,
+                                        SharedMemoryHandle* new_handle,
+                                        bool close_self,
+                                        ShareMode share_mode) {
+  DCHECK(shm_.IsValid());
+
+  bool success = false;
+  switch (share_mode) {
+    case SHARE_CURRENT_MODE:
+      *new_handle = shm_.Duplicate();
+      success = true;
+      break;
+    case SHARE_READONLY:
+      success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+      break;
+  }
+
+  if (success)
+    new_handle->SetOwnershipPassesToIPC(true);
+
+  if (close_self) {
+    Unmap();
+    Close();
+  }
+
+  return success;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
new file mode 100644
index 0000000..c7d20ec
--- /dev/null
+++ b/base/memory/shared_memory_mac_unittest.cc
@@ -0,0 +1,459 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <servers/bootstrap.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/command_line.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+
+// Gets the current and maximum protection levels of the memory region.
+// Returns whether the operation was successful.
+// |current| and |max| are output variables only populated on success.
+bool GetProtections(void* address, size_t size, int* current, int* max) {
+  vm_region_info_t region_info;
+  mach_vm_address_t mem_address = reinterpret_cast<mach_vm_address_t>(address);
+  mach_vm_size_t mem_size = size;
+  vm_region_basic_info_64 basic_info;
+
+  region_info = reinterpret_cast<vm_region_recurse_info_t>(&basic_info);
+  vm_region_flavor_t flavor = VM_REGION_BASIC_INFO_64;
+  memory_object_name_t memory_object;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+  kern_return_t kr =
+      mach_vm_region(mach_task_self(), &mem_address, &mem_size, flavor,
+                     region_info, &count, &memory_object);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "Failed to get region info.";
+    return false;
+  }
+
+  *current = basic_info.protection;
+  *max = basic_info.max_protection;
+  return true;
+}
+
+// Creates a new SharedMemory with the given |size|, filled with 'a'.
+std::unique_ptr<SharedMemory> CreateSharedMemory(int size) {
+  SharedMemoryHandle shm(size);
+  if (!shm.IsValid()) {
+    LOG(ERROR) << "Failed to make SharedMemoryHandle";
+    return nullptr;
+  }
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(size);
+  memset(shared_memory->memory(), 'a', size);
+  return shared_memory;
+}
+
+static const std::string g_service_switch_name = "service_name";
+
+// Structs used to pass a mach port from client to server.
+struct MachSendPortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+};
+struct MachReceivePortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+  mach_msg_trailer_t trailer;
+};
+
+// Makes the current process into a Mach Server with the given |service_name|.
+mach_port_t BecomeMachServer(const char* service_name) {
+  mach_port_t port;
+  kern_return_t kr = bootstrap_check_in(bootstrap_port, service_name, &port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "BecomeMachServer";
+  return port;
+}
+
+// Returns the mach port for the Mach Server with the given |service_name|.
+mach_port_t LookupServer(const char* service_name) {
+  mach_port_t server_port;
+  kern_return_t kr =
+      bootstrap_look_up(bootstrap_port, service_name, &server_port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "LookupServer";
+  return server_port;
+}
+
+mach_port_t MakeReceivingPort() {
+  mach_port_t client_port;
+  kern_return_t kr =
+      mach_port_allocate(mach_task_self(),         // our task is acquiring
+                         MACH_PORT_RIGHT_RECEIVE,  // a new receive right
+                         &client_port);            // with this name
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "MakeReceivingPort";
+  return client_port;
+}
+
+// Blocks until a mach message is sent to |server_port|. This mach message
+// must contain a mach port. Returns that mach port.
+mach_port_t ReceiveMachPort(mach_port_t port_to_listen_on) {
+  MachReceivePortMessage recv_msg;
+  mach_msg_header_t* recv_hdr = &(recv_msg.header);
+  recv_hdr->msgh_local_port = port_to_listen_on;
+  recv_hdr->msgh_size = sizeof(recv_msg);
+  kern_return_t kr =
+      mach_msg(recv_hdr,               // message buffer
+               MACH_RCV_MSG,           // option indicating service
+               0,                      // send size
+               recv_hdr->msgh_size,    // size of header + body
+               port_to_listen_on,      // receive name
+               MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+               MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "ReceiveMachPort";
+  mach_port_t other_task_port = recv_msg.data.name;
+  return other_task_port;
+}
+
+// Passes a copy of the send right of |port_to_send| to |receiving_port|.
+void SendMachPort(mach_port_t receiving_port,
+                  mach_port_t port_to_send,
+                  int disposition) {
+  MachSendPortMessage send_msg;
+  mach_msg_header_t* send_hdr;
+  send_hdr = &(send_msg.header);
+  send_hdr->msgh_bits =
+      MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0) | MACH_MSGH_BITS_COMPLEX;
+  send_hdr->msgh_size = sizeof(send_msg);
+  send_hdr->msgh_remote_port = receiving_port;
+  send_hdr->msgh_local_port = MACH_PORT_NULL;
+  send_hdr->msgh_reserved = 0;
+  send_hdr->msgh_id = 0;
+  send_msg.body.msgh_descriptor_count = 1;
+  send_msg.data.name = port_to_send;
+  send_msg.data.disposition = disposition;
+  send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+  int kr = mach_msg(send_hdr,               // message buffer
+                    MACH_SEND_MSG,          // option indicating send
+                    send_hdr->msgh_size,    // size of header + body
+                    0,                      // receive limit
+                    MACH_PORT_NULL,         // receive name
+                    MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+                    MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "SendMachPort";
+}
+
+std::string CreateRandomServiceName() {
+  return StringPrintf("SharedMemoryMacMultiProcessTest.%llu", RandUint64());
+}
+
+// Sets up the mach communication ports with the server. Returns a port to which
+// the server will send mach objects.
+mach_port_t CommonChildProcessSetUp() {
+  CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+  std::string service_name =
+      cmd_line.GetSwitchValueASCII(g_service_switch_name);
+  mac::ScopedMachSendRight server_port(LookupServer(service_name.c_str()));
+  mach_port_t client_port = MakeReceivingPort();
+
+  // Send the port that this process is listening on to the server.
+  SendMachPort(server_port.get(), client_port, MACH_MSG_TYPE_MAKE_SEND);
+  return client_port;
+}
+
+// The number of active names in the current task's port name space.
+mach_msg_type_number_t GetActiveNameCount() {
+  mach_port_name_array_t name_array;
+  mach_msg_type_number_t names_count;
+  mach_port_type_array_t type_array;
+  mach_msg_type_number_t types_count;
+  kern_return_t kr = mach_port_names(mach_task_self(), &name_array,
+                                     &names_count, &type_array, &types_count);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "GetActiveNameCount";
+  return names_count;
+}
+
+}  // namespace
+
+class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
+ public:
+  SharedMemoryMacMultiProcessTest() {}
+
+  CommandLine MakeCmdLine(const std::string& procname) override {
+    CommandLine command_line = MultiProcessTest::MakeCmdLine(procname);
+    // Pass the service name to the child process.
+    command_line.AppendSwitchASCII(g_service_switch_name, service_name_);
+    return command_line;
+  }
+
+  void SetUpChild(const std::string& name) {
+    // Make a random service name so that this test doesn't conflict with other
+    // similar tests.
+    service_name_ = CreateRandomServiceName();
+    server_port_.reset(BecomeMachServer(service_name_.c_str()));
+    child_process_ = SpawnChild(name);
+    client_port_.reset(ReceiveMachPort(server_port_.get()));
+  }
+
+  static const int s_memory_size = 99999;
+
+ protected:
+  std::string service_name_;
+
+  // A port on which the main process listens for mach messages from the child
+  // process.
+  mac::ScopedMachReceiveRight server_port_;
+
+  // A port on which the child process listens for mach messages from the main
+  // process.
+  mac::ScopedMachSendRight client_port_;
+
+  base::Process child_process_;
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
+};
+
+// Tests that content written to shared memory in the server process can be read
+// by the child process.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
+  SetUpChild("MachBasedSharedMemoryClient");
+
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
+               MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         GetCurrentProcId());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(SharedMemoryMacMultiProcessTest::s_memory_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (int i = 0; i < SharedMemoryMacMultiProcessTest::s_memory_size; ++i) {
+    DCHECK_EQ(start[i], 'a');
+  }
+  return 0;
+}
+
+// Tests that mapping shared memory with an offset works correctly.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
+  SetUpChild("MachBasedSharedMemoryWithOffsetClient");
+
+  SharedMemoryHandle shm(s_memory_size);
+  ASSERT_TRUE(shm.IsValid());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(s_memory_size);
+
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  char* start = static_cast<char*>(shared_memory.memory());
+  memset(start, 'a', page_size);
+  memset(start + page_size, 'b', page_size);
+  memset(start + 2 * page_size, 'c', page_size);
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(
+      client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryWithOffsetClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         GetCurrentProcId());
+  SharedMemory shared_memory(shm, false);
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  shared_memory.MapAt(page_size, 2 * page_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (size_t i = 0; i < page_size; ++i) {
+    DCHECK_EQ(start[i], 'b');
+  }
+  for (size_t i = page_size; i < 2 * page_size; ++i) {
+    DCHECK_EQ(start[i], 'c');
+  }
+  return 0;
+}
+
+// Tests that duplication and closing has the right effect on Mach reference
+// counts.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size);
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Duplicating the SharedMemoryHandle increments the ref count, but doesn't
+  // make a new name.
+  shm.Duplicate();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The first time has
+  // no effect.
+  shm.Close();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The second time
+  // destroys the port.
+  shm.Close();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that Mach shared memory can be mapped and unmapped.
+TEST_F(SharedMemoryMacMultiProcessTest, MachUnmapMap) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  std::unique_ptr<SharedMemory> shared_memory =
+      CreateSharedMemory(s_memory_size);
+  ASSERT_TRUE(shared_memory->Unmap());
+  ASSERT_TRUE(shared_memory->Map(s_memory_size));
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that passing a SharedMemoryHandle to a SharedMemory object also passes
+// ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
+// as well.
+TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size);
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Name count doesn't change when mapping the memory.
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(s_memory_size);
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Destroying the SharedMemory object frees the resource.
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the read-only flag works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
+  ASSERT_TRUE(shm2.IsValid());
+  SharedMemory shared_memory2(shm2, true);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method ShareToProcess() works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcess) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2;
+    ASSERT_TRUE(shared_memory->ShareToProcess(GetCurrentProcId(), &shm2));
+    ASSERT_TRUE(shm2.IsValid());
+    SharedMemory shared_memory2(shm2, true);
+    shared_memory2.Map(s_memory_size);
+
+    ASSERT_EQ(0, memcmp(shared_memory->memory(), shared_memory2.memory(),
+                        s_memory_size));
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the method ShareReadOnlyToProcess() creates a memory object that
+// is read only.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonly) {
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  // Check the protection levels.
+  int current_prot, max_prot;
+  ASSERT_TRUE(GetProtections(shared_memory->memory(),
+                             shared_memory->mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, current_prot);
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, max_prot);
+
+  // Make a new memory object.
+  SharedMemoryHandle shm2;
+  ASSERT_TRUE(shared_memory->ShareReadOnlyToProcess(GetCurrentProcId(), &shm2));
+  ASSERT_TRUE(shm2.IsValid());
+
+  // Mapping with |readonly| set to |false| should fail.
+  SharedMemory shared_memory2(shm2, false);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_EQ(nullptr, shared_memory2.memory());
+
+  // Now trying mapping with |readonly| set to |true|.
+  SharedMemory shared_memory3(shm2.Duplicate(), true);
+  shared_memory3.Map(s_memory_size);
+  ASSERT_NE(nullptr, shared_memory3.memory());
+
+  // Check the protection levels.
+  ASSERT_TRUE(GetProtections(shared_memory3.memory(),
+                             shared_memory3.mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ, current_prot);
+  ASSERT_EQ(VM_PROT_READ, max_prot);
+
+  // The memory should still be readonly, since the underlying memory object
+  // is readonly.
+  ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method ShareReadOnlyToProcess() doesn't leak.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonlyLeak) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2;
+    ASSERT_TRUE(
+        shared_memory->ShareReadOnlyToProcess(GetCurrentProcId(), &shm2));
+    ASSERT_TRUE(shm2.IsValid());
+
+    // Intentionally map with |readonly| set to |false|.
+    SharedMemory shared_memory2(shm2, false);
+    shared_memory2.Map(s_memory_size);
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+}  //  namespace base
diff --git a/base/memory/shared_memory_posix.cc b/base/memory/shared_memory_posix.cc
new file mode 100644
index 0000000..7e94223
--- /dev/null
+++ b/base/memory/shared_memory_posix.cc
@@ -0,0 +1,505 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#include "third_party/ashmem/ashmem.h"
+#elif defined(__ANDROID__)
+#include <cutils/ashmem.h>
+#endif
+
+namespace base {
+
+namespace {
+
+struct ScopedPathUnlinkerTraits {
+  static FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(FilePath* path) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::Unlink"));
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
+// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFILE* fp,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path) {
+  // It doesn't make sense to have a open-existing private piece of shmem
+  DCHECK(!options.open_existing_deprecated);
+  // Q: Why not use the shm_open() etc. APIs?
+  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
+  FilePath directory;
+  ScopedPathUnlinker path_unlinker;
+  if (GetShmemTempDir(options.executable, &directory)) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::OpenTemporaryFile"));
+    fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+
+    // Deleting the file prevents anyone else from mapping it in (making it
+    // private), and prevents the need for cleanup (once the last fd is
+    // closed, it is truly freed).
+    if (*fp)
+      path_unlinker.reset(path);
+  }
+
+  if (*fp) {
+    if (options.share_read_only) {
+      // TODO(erikchen): Remove ScopedTracker below once
+      // http://crbug.com/466437 is fixed.
+      tracked_objects::ScopedTracker tracking_profile(
+          FROM_HERE_WITH_EXPLICIT_FUNCTION(
+              "466437 SharedMemory::Create::OpenReadonly"));
+      // Also open as readonly so that we can ShareReadOnlyToProcess.
+      readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+      if (!readonly_fd->is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+        fp->reset();
+        return false;
+      }
+    }
+  }
+  return true;
+}
+#endif  // !defined(OS_ANDROID) &&  !defined(__ANDROID__)
+}
+
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+    : name_deprecated(nullptr),
+      open_existing_deprecated(false),
+      size(0),
+      executable(false),
+      share_read_only(false) {}
+
+SharedMemory::SharedMemory()
+    : mapped_file_(-1),
+      readonly_mapped_file_(-1),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(false),
+      requested_size_(0) {
+}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : mapped_file_(handle.fd),
+      readonly_mapped_file_(-1),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(read_only),
+      requested_size_(0) {
+}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.fd >= 0;
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+  return SharedMemoryHandle();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK_GE(handle.fd, 0);
+  if (IGNORE_EINTR(close(handle.fd)) < 0)
+    DPLOG(ERROR) << "close";
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  return base::GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  int duped_handle = HANDLE_EINTR(dup(handle.fd));
+  if (duped_handle < 0)
+    return base::SharedMemory::NULLHandle();
+  return base::FileDescriptor(duped_handle, true);
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.fd;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+// static
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t* size) {
+  struct stat st;
+  if (fstat(handle.fd, &st) != 0)
+    return false;
+  if (st.st_size < 0)
+    return false;
+  *size = st.st_size;
+  return true;
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+// TODO(jrg): there is no way to "clean up" all unused named shmem if
+// we restart from a crash.  (That isn't a new problem, but it is a problem.)
+// In case we want to delete it later, it may be useful to save the value
+// of mem_filename after FilePathForMemoryName().
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+  // is fixed.
+  tracked_objects::ScopedTracker tracking_profile1(
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(
+          "466437 SharedMemory::Create::Start"));
+  DCHECK_EQ(-1, mapped_file_);
+  if (options.size == 0) return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  ScopedFILE fp;
+  bool fix_size = true;
+  ScopedFD readonly_fd;
+
+  FilePath path;
+  if (options.name_deprecated == NULL || options.name_deprecated->empty()) {
+    bool result =
+        CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+    if (!result)
+      return false;
+  } else {
+    if (!FilePathForMemoryName(*options.name_deprecated, &path))
+      return false;
+
+    // Make sure that the file is opened without any permission
+    // to other users on the system.
+    const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
+
+    // First, try to create the file.
+    int fd = HANDLE_EINTR(
+        open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly));
+    if (fd == -1 && options.open_existing_deprecated) {
+      // If this doesn't work, try and open an existing file in append mode.
+      // Opening an existing file in a world writable directory has two main
+      // security implications:
+      // - Attackers could plant a file under their control, so ownership of
+      //   the file is checked below.
+      // - Attackers could plant a symbolic link so that an unexpected file
+      //   is opened, so O_NOFOLLOW is passed to open().
+      fd = HANDLE_EINTR(
+          open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW));
+
+      // Check that the current user owns the file.
+      // If uid != euid, then a more complex permission model is used and this
+      // API is not appropriate.
+      const uid_t real_uid = getuid();
+      const uid_t effective_uid = geteuid();
+      struct stat sb;
+      if (fd >= 0 &&
+          (fstat(fd, &sb) != 0 || sb.st_uid != real_uid ||
+           sb.st_uid != effective_uid)) {
+        LOG(ERROR) <<
+            "Invalid owner when opening existing shared memory file.";
+        close(fd);
+        return false;
+      }
+
+      // An existing file was opened, so its size should not be fixed.
+      fix_size = false;
+    }
+
+    if (options.share_read_only) {
+      // Also open as readonly so that we can ShareReadOnlyToProcess.
+      readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+      if (!readonly_fd.is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+        close(fd);
+        fd = -1;
+        return false;
+      }
+    }
+    if (fd >= 0) {
+      // "a+" is always appropriate: if it's a new file, a+ is similar to w+.
+      fp.reset(fdopen(fd, "a+"));
+    }
+  }
+  if (fp && fix_size) {
+    // Get current size.
+    struct stat stat;
+    if (fstat(fileno(fp.get()), &stat) != 0)
+      return false;
+    const size_t current_size = stat.st_size;
+    if (current_size != options.size) {
+      if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+        return false;
+    }
+    requested_size_ = options.size;
+  }
+  if (fp == nullptr) {
+    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+    FilePath dir = path.DirName();
+    if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+      PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+      if (dir.value() == "/dev/shm") {
+        LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+                   << "/dev/shm.  Try 'sudo chmod 1777 /dev/shm' to fix.";
+      }
+    }
+    return false;
+  }
+
+  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+}
+
+// Our current implementation of shmem is with mmap()ing of files.
+// These files need to be deleted explicitly.
+// In practice this call is only needed for unit tests.
+bool SharedMemory::Delete(const std::string& name) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  if (PathExists(path))
+    return base::DeleteFile(path, false);
+
+  // Doesn't exist, so success.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  read_only_ = read_only;
+
+  const char *mode = read_only ? "r" : "r+";
+  ScopedFILE fp(base::OpenFile(path, mode));
+  ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+  if (!readonly_fd.is_valid()) {
+    DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+    return false;
+  }
+  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+}
+#endif  // !defined(OS_ANDROID) && !defined(__ANDROID__)
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (mapped_file_ == -1)
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+#if defined(OS_ANDROID) || defined(__ANDROID__)
+  // On Android, Map can be called with a size and offset of zero to use the
+  // ashmem-determined size.
+  if (bytes == 0) {
+    DCHECK_EQ(0, offset);
+    int ashmem_bytes = ashmem_get_size_region(mapped_file_);
+    if (ashmem_bytes < 0)
+      return false;
+    bytes = ashmem_bytes;
+  }
+#endif
+
+  memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+                 MAP_SHARED, mapped_file_, offset);
+
+  bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
+  if (mmap_succeeded) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+        (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  } else {
+    memory_ = NULL;
+  }
+
+  return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+  if (memory_ == NULL)
+    return false;
+
+  munmap(memory_, mapped_size_);
+  memory_ = NULL;
+  mapped_size_ = 0;
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return FileDescriptor(mapped_file_, false);
+}
+
+void SharedMemory::Close() {
+  if (mapped_file_ > 0) {
+    if (IGNORE_EINTR(close(mapped_file_)) < 0)
+      PLOG(ERROR) << "close";
+    mapped_file_ = -1;
+  }
+  if (readonly_mapped_file_ > 0) {
+    if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
+      PLOG(ERROR) << "close";
+    readonly_mapped_file_ = -1;
+  }
+}
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
+  DCHECK_EQ(-1, mapped_file_);
+  DCHECK_EQ(-1, readonly_mapped_file_);
+  if (fp == nullptr)
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  struct stat st = {};
+  if (fstat(fileno(fp.get()), &st))
+    NOTREACHED();
+  if (readonly_fd.is_valid()) {
+    struct stat readonly_st = {};
+    if (fstat(readonly_fd.get(), &readonly_st))
+      NOTREACHED();
+    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+      return false;
+    }
+  }
+
+  mapped_file_ = HANDLE_EINTR(dup(fileno(fp.get())));
+  if (mapped_file_ == -1) {
+    if (errno == EMFILE) {
+      LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
+      return false;
+    } else {
+      NOTREACHED() << "Call to dup failed, errno=" << errno;
+    }
+  }
+  readonly_mapped_file_ = readonly_fd.release();
+
+  return true;
+}
+
+// For the given shmem named |mem_name|, return a filename to mmap()
+// (and possibly create).  Modifies |filename|.  Return false on
+// error, or true of we are happy.
+bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
+                                         FilePath* path) {
+  // mem_name will be used for a filename; make sure it doesn't
+  // contain anything which will confuse us.
+  DCHECK_EQ(std::string::npos, mem_name.find('/'));
+  DCHECK_EQ(std::string::npos, mem_name.find('\0'));
+
+  FilePath temp_dir;
+  if (!GetShmemTempDir(false, &temp_dir))
+    return false;
+
+#if defined(GOOGLE_CHROME_BUILD)
+  std::string name_base = std::string("com.google.Chrome");
+#else
+  std::string name_base = std::string("org.chromium.Chromium");
+#endif
+  *path = temp_dir.AppendASCII(name_base + ".shmem." + mem_name);
+  return true;
+}
+#endif  // !defined(OS_ANDROID) && !defined(__ANDROID__)
+
+bool SharedMemory::ShareToProcessCommon(ProcessHandle,
+                                        SharedMemoryHandle* new_handle,
+                                        bool close_self,
+                                        ShareMode share_mode) {
+  int handle_to_dup = -1;
+  switch(share_mode) {
+    case SHARE_CURRENT_MODE:
+      handle_to_dup = mapped_file_;
+      break;
+    case SHARE_READONLY:
+      // We could imagine re-opening the file from /dev/fd, but that can't make
+      // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
+      CHECK_GE(readonly_mapped_file_, 0);
+      handle_to_dup = readonly_mapped_file_;
+      break;
+  }
+
+  const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
+  if (new_fd < 0) {
+    if (close_self) {
+      Unmap();
+      Close();
+    }
+    DPLOG(ERROR) << "dup() failed.";
+    return false;
+  }
+
+  new_handle->fd = new_fd;
+  new_handle->auto_close = true;
+
+  if (close_self) {
+    Unmap();
+    Close();
+  }
+
+  return true;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
new file mode 100644
index 0000000..f29865c
--- /dev/null
+++ b/base/memory/shared_memory_unittest.cc
@@ -0,0 +1,697 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/kill.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+namespace {
+
+#if !defined(OS_MACOSX)
+// Each thread will open the shared memory.  Each thread will take a different 4
+// byte int pointer, and keep changing it, with some small pauses in between.
+// Verify that each thread's value in the shared memory is always correct.
+class MultipleThreadMain : public PlatformThread::Delegate {
+ public:
+  explicit MultipleThreadMain(int16_t id) : id_(id) {}
+  ~MultipleThreadMain() override {}
+
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  // PlatformThread::Delegate interface.
+  void ThreadMain() override {
+    const uint32_t kDataSize = 1024;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
+    EXPECT_TRUE(rv);
+    rv = memory.Map(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memory.memory()) + id_;
+    EXPECT_EQ(0, *ptr);
+
+    for (int idx = 0; idx < 100; idx++) {
+      *ptr = idx;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+      EXPECT_EQ(*ptr, idx);
+    }
+    // Reset back to 0 for the next test that uses the same name.
+    *ptr = 0;
+
+    memory.Close();
+  }
+
+ private:
+  int16_t id_;
+
+  static const char s_test_name_[];
+
+  DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
+};
+
+const char MultipleThreadMain::s_test_name_[] =
+    "SharedMemoryOpenThreadTest";
+#endif  // !defined(OS_MACOSX)
+
+}  // namespace
+
+// Android/Mac doesn't support SharedMemory::Open/Delete/
+// CreateNamedDeprecated(openExisting=true)
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX)
+TEST(SharedMemoryTest, OpenClose) {
+  const uint32_t kDataSize = 1024;
+  std::string test_name = "SharedMemoryOpenCloseTest";
+
+  // Open two handles to a memory segment, confirm that they are mapped
+  // separately yet point to the same space.
+  SharedMemory memory1;
+  bool rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Open(test_name, false);
+  EXPECT_FALSE(rv);
+  rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  SharedMemory memory2;
+  rv = memory2.Open(test_name, false);
+  EXPECT_TRUE(rv);
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  EXPECT_NE(memory1.memory(), memory2.memory());  // Compare the pointers.
+
+  // Make sure we don't segfault. (it actually happened!)
+  ASSERT_NE(memory1.memory(), static_cast<void*>(NULL));
+  ASSERT_NE(memory2.memory(), static_cast<void*>(NULL));
+
+  // Write data to the first memory segment, verify contents of second.
+  memset(memory1.memory(), '1', kDataSize);
+  EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
+
+  // Close the first memory segment, and verify the second has the right data.
+  memory1.Close();
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
+    EXPECT_EQ(*ptr, '1');
+
+  // Close the second memory segment.
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory2.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+
+TEST(SharedMemoryTest, OpenExclusive) {
+  const uint32_t kDataSize = 1024;
+  const uint32_t kDataSize2 = 2048;
+  std::ostringstream test_name_stream;
+  test_name_stream << "SharedMemoryOpenExclusiveTest."
+                   << Time::Now().ToDoubleT();
+  std::string test_name = test_name_stream.str();
+
+  // Open two handles to a memory segment and check that
+  // open_existing_deprecated works as expected.
+  SharedMemory memory1;
+  bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+
+  // Memory1 knows it's size because it created it.
+  EXPECT_EQ(memory1.requested_size(), kDataSize);
+
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory1 must be at least the size we asked for.
+  EXPECT_GE(memory1.mapped_size(), kDataSize);
+
+  // The mapped memory1 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory1.mapped_size(),
+            kDataSize + SysInfo::VMAllocationGranularity());
+
+  memset(memory1.memory(), 'G', kDataSize);
+
+  SharedMemory memory2;
+  // Should not be able to create if openExisting is false.
+  rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
+  EXPECT_FALSE(rv);
+
+  // Should be able to create with openExisting true.
+  rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
+  EXPECT_TRUE(rv);
+
+  // Memory2 shouldn't know the size because we didn't create it.
+  EXPECT_EQ(memory2.requested_size(), 0U);
+
+  // We should be able to map the original size.
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory2 must be at least the size of the original.
+  EXPECT_GE(memory2.mapped_size(), kDataSize);
+
+  // The mapped memory2 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory2.mapped_size(),
+            kDataSize2 + SysInfo::VMAllocationGranularity());
+
+  // Verify that opening memory2 didn't truncate or delete memory 1.
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
+    EXPECT_EQ(*ptr, 'G');
+  }
+
+  memory1.Close();
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+#endif  // !defined(OS_ANDROID) && !defined(OS_MACOSX)
+
+// Check that memory is still mapped after its closed.
+TEST(SharedMemoryTest, CloseNoUnmap) {
+  const size_t kDataSize = 4096;
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  char* ptr = static_cast<char*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(NULL));
+  memset(ptr, 'G', kDataSize);
+
+  memory.Close();
+
+  EXPECT_EQ(ptr, memory.memory());
+  EXPECT_EQ(SharedMemory::NULLHandle(), memory.handle());
+
+  for (size_t i = 0; i < kDataSize; i++) {
+    EXPECT_EQ('G', ptr[i]);
+  }
+
+  memory.Unmap();
+  EXPECT_EQ(nullptr, memory.memory());
+}
+
+#if !defined(OS_MACOSX)
+// Create a set of N threads to each open a shared memory segment and write to
+// it. Verify that they are always reading/writing consistent data.
+TEST(SharedMemoryTest, MultipleThreads) {
+  const int kNumThreads = 5;
+
+  MultipleThreadMain::CleanUp();
+  // On POSIX we have a problem when 2 threads try to create the shmem
+  // (a file) at exactly the same time, since create both creates the
+  // file and zerofills it.  We solve the problem for this unit test
+  // (make it not flaky) by starting with 1 thread, then
+  // intentionally don't clean up its shmem before running with
+  // kNumThreads.
+
+  int threadcounts[] = { 1, kNumThreads };
+  for (size_t i = 0; i < arraysize(threadcounts); i++) {
+    int numthreads = threadcounts[i];
+    std::unique_ptr<PlatformThreadHandle[]> thread_handles;
+    std::unique_ptr<MultipleThreadMain* []> thread_delegates;
+
+    thread_handles.reset(new PlatformThreadHandle[numthreads]);
+    thread_delegates.reset(new MultipleThreadMain*[numthreads]);
+
+    // Spawn the threads.
+    for (int16_t index = 0; index < numthreads; index++) {
+      PlatformThreadHandle pth;
+      thread_delegates[index] = new MultipleThreadMain(index);
+      EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
+      thread_handles[index] = pth;
+    }
+
+    // Wait for the threads to finish.
+    for (int index = 0; index < numthreads; index++) {
+      PlatformThread::Join(thread_handles[index]);
+      delete thread_delegates[index];
+    }
+  }
+  MultipleThreadMain::CleanUp();
+}
+#endif
+
+// Allocate private (unique) shared memory with an empty string for a
+// name.  Make sure several of them don't point to the same thing as
+// we might expect if the names are equal.
+TEST(SharedMemoryTest, AnonymousPrivate) {
+  int i, j;
+  int count = 4;
+  bool rv;
+  const uint32_t kDataSize = 8192;
+
+  std::unique_ptr<SharedMemory[]> memories(new SharedMemory[count]);
+  std::unique_ptr<int* []> pointers(new int*[count]);
+  ASSERT_TRUE(memories.get());
+  ASSERT_TRUE(pointers.get());
+
+  for (i = 0; i < count; i++) {
+    rv = memories[i].CreateAndMapAnonymous(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memories[i].memory());
+    EXPECT_TRUE(ptr);
+    pointers[i] = ptr;
+  }
+
+  for (i = 0; i < count; i++) {
+    // zero out the first int in each except for i; for that one, make it 100.
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        pointers[j][0] = 100;
+      else
+        pointers[j][0] = 0;
+    }
+    // make sure there is no bleeding of the 100 into the other pointers
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        EXPECT_EQ(100, pointers[j][0]);
+      else
+        EXPECT_EQ(0, pointers[j][0]);
+    }
+  }
+
+  for (int i = 0; i < count; i++) {
+    memories[i].Close();
+  }
+}
+
+// The Mach functionality is tested in shared_memory_mac_unittest.cc.
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+TEST(SharedMemoryTest, ShareReadOnly) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory writable_shmem;
+  SharedMemoryCreateOptions options;
+  options.size = contents.size();
+  options.share_read_only = true;
+  ASSERT_TRUE(writable_shmem.Create(options));
+  ASSERT_TRUE(writable_shmem.Map(options.size));
+  memcpy(writable_shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(writable_shmem.Unmap());
+
+  SharedMemoryHandle readonly_handle;
+  ASSERT_TRUE(writable_shmem.ShareReadOnlyToProcess(GetCurrentProcessHandle(),
+                                                    &readonly_handle));
+  SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly_shmem.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly_shmem.memory()),
+                        contents.size()));
+  EXPECT_TRUE(readonly_shmem.Unmap());
+
+  // Make sure the writable instance is still writable.
+  ASSERT_TRUE(writable_shmem.Map(contents.size()));
+  StringPiece new_contents = "Goodbye";
+  memcpy(writable_shmem.memory(), new_contents.data(), new_contents.size());
+  EXPECT_EQ(new_contents,
+            StringPiece(static_cast<const char*>(writable_shmem.memory()),
+                        new_contents.size()));
+
+  // We'd like to check that if we send the read-only segment to another
+  // process, then that other process can't reopen it read/write.  (Since that
+  // would be a security hole.)  Setting up multiple processes is hard in a
+  // unittest, so this test checks that the *current* process can't reopen the
+  // segment read/write.  I think the test here is stronger than we actually
+  // care about, but there's a remote possibility that sending a file over a
+  // pipe would transform it into read/write.
+  SharedMemoryHandle handle = readonly_shmem.handle();
+
+#if defined(OS_ANDROID)
+  // The "read-only" handle is still writable on Android:
+  // http://crbug.com/320865
+  (void)handle;
+#elif defined(OS_POSIX)
+  int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
+  EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
+      << "The descriptor itself should be read-only.";
+
+  errno = 0;
+  void* writable = mmap(NULL, contents.size(), PROT_READ | PROT_WRITE,
+                        MAP_SHARED, handle_fd, 0);
+  int mmap_errno = errno;
+  EXPECT_EQ(MAP_FAILED, writable)
+      << "It shouldn't be possible to re-mmap the descriptor writable.";
+  EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
+  if (writable != MAP_FAILED)
+    EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
+
+#elif defined(OS_WIN)
+  EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
+      << "Shouldn't be able to map memory writable.";
+
+  HANDLE temp_handle;
+  BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                              GetCurrentProcess(), &temp_handle,
+                              FILE_MAP_ALL_ACCESS, false, 0);
+  EXPECT_EQ(FALSE, rv)
+      << "Shouldn't be able to duplicate the handle into a writable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+  rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                         GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
+                         false, 0);
+  EXPECT_EQ(TRUE, rv)
+      << "Should be able to duplicate the handle into a readable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+#else
+#error Unexpected platform; write a test that tries to make 'handle' writable.
+#endif  // defined(OS_POSIX) || defined(OS_WIN)
+}
+#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS))
+
+TEST(SharedMemoryTest, ShareToSelf) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory shmem;
+  ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
+  memcpy(shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(shmem.Unmap());
+
+  SharedMemoryHandle shared_handle;
+  ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
+#if defined(OS_WIN)
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+#endif
+  SharedMemory shared(shared_handle, /*readonly=*/false);
+
+  ASSERT_TRUE(shared.Map(contents.size()));
+  EXPECT_EQ(
+      contents,
+      StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
+
+  shared_handle = SharedMemoryHandle();
+  ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
+#if defined(OS_WIN)
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+#endif
+  SharedMemory readonly(shared_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly.memory()),
+                        contents.size()));
+}
+
+TEST(SharedMemoryTest, MapAt) {
+  ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
+  const size_t kCount = SysInfo::VMAllocationGranularity();
+  const size_t kDataSize = kCount * sizeof(uint32_t);
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(NULL));
+
+  for (size_t i = 0; i < kCount; ++i) {
+    ptr[i] = i;
+  }
+
+  memory.Unmap();
+
+  off_t offset = SysInfo::VMAllocationGranularity();
+  ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
+  offset /= sizeof(uint32_t);
+  ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(NULL));
+  for (size_t i = offset; i < kCount; ++i) {
+    EXPECT_EQ(ptr[i - offset], i);
+  }
+}
+
+TEST(SharedMemoryTest, MapTwice) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  bool rv = memory.CreateAndMapAnonymous(kDataSize);
+  EXPECT_TRUE(rv);
+
+  void* old_address = memory.memory();
+
+  rv = memory.Map(kDataSize);
+  EXPECT_FALSE(rv);
+  EXPECT_EQ(old_address, memory.memory());
+}
+
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+// This test is not applicable for iOS (crbug.com/399384).
+#if !defined(OS_IOS)
+// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
+TEST(SharedMemoryTest, AnonymousExecutable) {
+  const uint32_t kTestSize = 1 << 16;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+  options.executable = true;
+
+  EXPECT_TRUE(shared_memory.Create(options));
+  EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
+
+  EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
+                        PROT_READ | PROT_EXEC));
+}
+#endif  // !defined(OS_IOS)
+
+// Android supports a different permission model than POSIX for its "ashmem"
+// shared memory implementation. So the tests about file permissions are not
+// included on Android.
+#if !defined(OS_ANDROID)
+
+// Set a umask and restore the old mask on destruction.
+class ScopedUmaskSetter {
+ public:
+  explicit ScopedUmaskSetter(mode_t target_mask) {
+    old_umask_ = umask(target_mask);
+  }
+  ~ScopedUmaskSetter() { umask(old_umask_); }
+ private:
+  mode_t old_umask_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
+};
+
+// Create a shared memory object, check its permissions.
+TEST(SharedMemoryTest, FilePermissionsAnonymous) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int shm_fd =
+      SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
+  // Neither the group, nor others should be able to read the shared memory
+  // file.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+
+// Create a shared memory object, check its permissions.
+TEST(SharedMemoryTest, FilePermissionsNamed) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(fd, &shm_stat));
+  // Neither the group, nor others should have been able to open the shared
+  // memory file while its name existed.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+#endif  // !defined(OS_ANDROID)
+
+#endif  // defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though.  Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TEST(SharedMemoryTest, MapMinimumAlignment) {
+  static const int kDataSize = 8192;
+
+  SharedMemory shared_memory;
+  ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
+  EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
+      shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  shared_memory.Close();
+}
+
+#if defined(OS_WIN)
+TEST(SharedMemoryTest, UnsafeImageSection) {
+  const char kTestSectionName[] = "UnsafeImageSection";
+  wchar_t path[MAX_PATH];
+  EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
+
+  // Map the current executable image to save us creating a new PE file on disk.
+  base::win::ScopedHandle file_handle(::CreateFile(
+      path, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr));
+  EXPECT_TRUE(file_handle.IsValid());
+  base::win::ScopedHandle section_handle(
+      ::CreateFileMappingA(file_handle.Get(), nullptr,
+                           PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
+  EXPECT_TRUE(section_handle.IsValid());
+
+  // Check direct opening by name, from handle and duplicated from handle.
+  SharedMemory shared_memory_open;
+  EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
+  EXPECT_FALSE(shared_memory_open.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_open.memory());
+
+  SharedMemory shared_memory_handle_local(
+      SharedMemoryHandle(section_handle.Take(), ::GetCurrentProcessId()), true);
+  EXPECT_FALSE(shared_memory_handle_local.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_local.memory());
+
+  // Check that a handle without SECTION_QUERY also can't be mapped as it can't
+  // be checked.
+  SharedMemory shared_memory_handle_dummy;
+  SharedMemoryCreateOptions options;
+  options.size = 0x1000;
+  EXPECT_TRUE(shared_memory_handle_dummy.Create(options));
+  HANDLE handle_no_query;
+  EXPECT_TRUE(::DuplicateHandle(
+      ::GetCurrentProcess(), shared_memory_handle_dummy.handle().GetHandle(),
+      ::GetCurrentProcess(), &handle_no_query, FILE_MAP_READ, FALSE, 0));
+  SharedMemory shared_memory_handle_no_query(
+      SharedMemoryHandle(handle_no_query, ::GetCurrentProcessId()), true);
+  EXPECT_FALSE(shared_memory_handle_no_query.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_no_query.memory());
+}
+#endif  // defined(OS_WIN)
+
+// iOS does not allow multiple processes.
+// Android ashmem does not support named shared memory.
+// Mac SharedMemory does not support named shared memory. crbug.com/345734
+#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
+// On POSIX it is especially important we test shmem across processes,
+// not just across threads.  But the test is enabled on all platforms.
+class SharedMemoryProcessTest : public MultiProcessTest {
+ public:
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  static int TaskTestMain() {
+    int errors = 0;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    rv = memory.Map(s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    int* ptr = static_cast<int*>(memory.memory());
+
+    // This runs concurrently in multiple processes. Writes need to be atomic.
+    subtle::Barrier_AtomicIncrement(ptr, 1);
+    memory.Close();
+    return errors;
+  }
+
+  static const char s_test_name_[];
+  static const uint32_t s_data_size_;
+};
+
+const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
+const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
+
+TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
+  const int kNumTasks = 5;
+
+  SharedMemoryProcessTest::CleanUp();
+
+  // Create a shared memory region. Set the first word to 0.
+  SharedMemory memory;
+  bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+  ASSERT_TRUE(rv);
+  rv = memory.Map(s_data_size_);
+  ASSERT_TRUE(rv);
+  int* ptr = static_cast<int*>(memory.memory());
+  *ptr = 0;
+
+  // Start |kNumTasks| processes, each of which atomically increments the first
+  // word by 1.
+  Process processes[kNumTasks];
+  for (int index = 0; index < kNumTasks; ++index) {
+    processes[index] = SpawnChild("SharedMemoryTestMain");
+    ASSERT_TRUE(processes[index].IsValid());
+  }
+
+  // Check that each process exited correctly.
+  int exit_code = 0;
+  for (int index = 0; index < kNumTasks; ++index) {
+    EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+    EXPECT_EQ(0, exit_code);
+  }
+
+  // Check that the shared memory region reflects |kNumTasks| increments.
+  ASSERT_EQ(kNumTasks, *ptr);
+
+  memory.Close();
+  SharedMemoryProcessTest::CleanUp();
+}
+
+MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
+  return SharedMemoryProcessTest::TaskTestMain();
+}
+#endif  // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
+
+}  // namespace base
diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc
index d9ce86a..4e77b04 100644
--- a/base/memory/weak_ptr.cc
+++ b/base/memory/weak_ptr.cc
@@ -40,6 +40,10 @@
 WeakReference::~WeakReference() {
 }
 
+WeakReference::WeakReference(WeakReference&& other) = default;
+
+WeakReference::WeakReference(const WeakReference& other) = default;
+
 bool WeakReference::is_valid() const { return flag_.get() && flag_->IsValid(); }
 
 WeakReferenceOwner::WeakReferenceOwner() {
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
index 33d1e47..3544439 100644
--- a/base/memory/weak_ptr.h
+++ b/base/memory/weak_ptr.h
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 // Weak pointers are pointers to an object that do not affect its lifetime,
-// and which may be invalidated (i.e. reset to NULL) by the object, or its
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
 // owner, at any time, most commonly when the object is about to be deleted.
 
 // Weak pointers are useful when an object needs to be accessed safely by one
@@ -70,12 +70,14 @@
 #ifndef BASE_MEMORY_WEAK_PTR_H_
 #define BASE_MEMORY_WEAK_PTR_H_
 
+#include <cstddef>
+#include <type_traits>
+
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/sequence_checker.h"
-#include "base/template_util.h"
 
 namespace base {
 
@@ -110,6 +112,11 @@
   explicit WeakReference(const Flag* flag);
   ~WeakReference();
 
+  WeakReference(WeakReference&& other);
+  WeakReference(const WeakReference& other);
+  WeakReference& operator=(WeakReference&& other) = default;
+  WeakReference& operator=(const WeakReference& other) = default;
+
   bool is_valid() const;
 
  private:
@@ -142,6 +149,11 @@
   WeakPtrBase();
   ~WeakPtrBase();
 
+  WeakPtrBase(const WeakPtrBase& other) = default;
+  WeakPtrBase(WeakPtrBase&& other) = default;
+  WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+  WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+
  protected:
   explicit WeakPtrBase(const WeakReference& ref);
 
@@ -159,10 +171,9 @@
   // function that makes calling this easier.
   template<typename Derived>
   static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
-    typedef
-        is_convertible<Derived, internal::SupportsWeakPtrBase&> convertible;
-    static_assert(convertible::value,
-                  "AsWeakPtr argument must inherit from SupportsWeakPtr");
+    static_assert(
+        std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
+        "AsWeakPtr argument must inherit from SupportsWeakPtr");
     return AsWeakPtrImpl<Derived>(t, *t);
   }
 
@@ -198,50 +209,39 @@
 template <typename T>
 class WeakPtr : public internal::WeakPtrBase {
  public:
-  WeakPtr() : ptr_(NULL) {
-  }
+  WeakPtr() : ptr_(nullptr) {}
+
+  WeakPtr(std::nullptr_t) : ptr_(nullptr) {}
 
   // Allow conversion from U to T provided U "is a" T. Note that this
-  // is separate from the (implicit) copy constructor.
+  // is separate from the (implicit) copy and move constructors.
   template <typename U>
   WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.ptr_) {
   }
+  template <typename U>
+  WeakPtr(WeakPtr<U>&& other)
+      : WeakPtrBase(std::move(other)), ptr_(other.ptr_) {}
 
-  T* get() const { return ref_.is_valid() ? ptr_ : NULL; }
+  T* get() const { return ref_.is_valid() ? ptr_ : nullptr; }
 
   T& operator*() const {
-    DCHECK(get() != NULL);
+    DCHECK(get() != nullptr);
     return *get();
   }
   T* operator->() const {
-    DCHECK(get() != NULL);
+    DCHECK(get() != nullptr);
     return get();
   }
 
-  // Allow WeakPtr<element_type> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
-  //
-  // Note that this trick is only safe when the == and != operators
-  // are declared explicitly, as otherwise "weak_ptr1 == weak_ptr2"
-  // will compile but do the wrong thing (i.e., convert to Testable
-  // and then do the comparison).
- private:
-  typedef T* WeakPtr::*Testable;
-
- public:
-  operator Testable() const { return get() ? &WeakPtr::ptr_ : NULL; }
-
   void reset() {
     ref_ = internal::WeakReference();
-    ptr_ = NULL;
+    ptr_ = nullptr;
   }
 
- private:
-  // Explicitly declare comparison operators as required by the bool
-  // trick, but keep them private.
-  template <class U> bool operator==(WeakPtr<U> const&) const;
-  template <class U> bool operator!=(WeakPtr<U> const&) const;
+  // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+  explicit operator bool() const { return get() != nullptr; }
 
+ private:
   friend class internal::SupportsWeakPtrBase;
   template <typename U> friend class WeakPtr;
   friend class SupportsWeakPtr<T>;
@@ -253,10 +253,28 @@
   }
 
   // This pointer is only valid when ref_.is_valid() is true.  Otherwise, its
-  // value is undefined (as opposed to NULL).
+  // value is undefined (as opposed to nullptr).
   T* ptr_;
 };
 
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr == nullptr;
+}
+
 // A class may be composed of a WeakPtrFactory and thereby
 // control how it exposes weak pointers to itself.  This is helpful if you only
 // need weak pointers within the implementation of a class.  This class is also
@@ -268,9 +286,7 @@
   explicit WeakPtrFactory(T* ptr) : ptr_(ptr) {
   }
 
-  ~WeakPtrFactory() {
-    ptr_ = NULL;
-  }
+  ~WeakPtrFactory() { ptr_ = nullptr; }
 
   WeakPtr<T> GetWeakPtr() {
     DCHECK(ptr_);
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
index 2c475f7..ebcf33c 100644
--- a/base/memory/weak_ptr_unittest.cc
+++ b/base/memory/weak_ptr_unittest.cc
@@ -4,11 +4,12 @@
 
 #include "base/memory/weak_ptr.h"
 
+#include <memory>
 #include <string>
 
 #include "base/bind.h"
+#include "base/debug/leak_annotations.h"
 #include "base/location.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread.h"
@@ -17,6 +18,10 @@
 namespace base {
 namespace {
 
+WeakPtr<int> PassThru(WeakPtr<int> ptr) {
+  return ptr;
+}
+
 template <class T>
 class OffThreadObjectCreator {
  public:
@@ -64,7 +69,8 @@
   ~BackgroundThread() override { Stop(); }
 
   void CreateArrowFromTarget(Arrow** arrow, Target* target) {
-    WaitableEvent completion(true, false);
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
     task_runner()->PostTask(
         FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromTarget, arrow,
                               target, &completion));
@@ -72,7 +78,8 @@
   }
 
   void CreateArrowFromArrow(Arrow** arrow, const Arrow* other) {
-    WaitableEvent completion(true, false);
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
     task_runner()->PostTask(
         FROM_HERE, base::Bind(&BackgroundThread::DoCreateArrowFromArrow, arrow,
                               other, &completion));
@@ -80,7 +87,8 @@
   }
 
   void DeleteTarget(Target* object) {
-    WaitableEvent completion(true, false);
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
     task_runner()->PostTask(
         FROM_HERE,
         base::Bind(&BackgroundThread::DoDeleteTarget, object, &completion));
@@ -88,7 +96,8 @@
   }
 
   void CopyAndAssignArrow(Arrow* object) {
-    WaitableEvent completion(true, false);
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
     task_runner()->PostTask(
         FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrow, object,
                               &completion));
@@ -96,7 +105,8 @@
   }
 
   void CopyAndAssignArrowBase(Arrow* object) {
-    WaitableEvent completion(true, false);
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
     task_runner()->PostTask(
         FROM_HERE, base::Bind(&BackgroundThread::DoCopyAndAssignArrowBase,
                               object, &completion));
@@ -104,7 +114,8 @@
   }
 
   void DeleteArrow(Arrow* object) {
-    WaitableEvent completion(true, false);
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
     task_runner()->PostTask(
         FROM_HERE,
         base::Bind(&BackgroundThread::DoDeleteArrow, object, &completion));
@@ -112,8 +123,9 @@
   }
 
   Target* DeRef(const Arrow* arrow) {
-    WaitableEvent completion(true, false);
-    Target* result = NULL;
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    Target* result = nullptr;
     task_runner()->PostTask(FROM_HERE, base::Bind(&BackgroundThread::DoDeRef,
                                                   arrow, &result, &completion));
     completion.Wait();
@@ -191,15 +203,25 @@
   EXPECT_EQ(ptr.get(), ptr2.get());
 }
 
+TEST(WeakPtrFactoryTest, Move) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  WeakPtr<int> ptr2 = factory.GetWeakPtr();
+  WeakPtr<int> ptr3 = std::move(ptr2);
+  EXPECT_NE(ptr.get(), ptr2.get());
+  EXPECT_EQ(ptr.get(), ptr3.get());
+}
+
 TEST(WeakPtrFactoryTest, OutOfScope) {
   WeakPtr<int> ptr;
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
   {
     int data;
     WeakPtrFactory<int> factory(&data);
     ptr = factory.GetWeakPtr();
   }
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
 }
 
 TEST(WeakPtrFactoryTest, Multiple) {
@@ -212,8 +234,8 @@
     EXPECT_EQ(&data, a.get());
     EXPECT_EQ(&data, b.get());
   }
-  EXPECT_EQ(NULL, a.get());
-  EXPECT_EQ(NULL, b.get());
+  EXPECT_EQ(nullptr, a.get());
+  EXPECT_EQ(nullptr, b.get());
 }
 
 TEST(WeakPtrFactoryTest, MultipleStaged) {
@@ -225,9 +247,9 @@
     {
       WeakPtr<int> b = factory.GetWeakPtr();
     }
-    EXPECT_TRUE(NULL != a.get());
+    EXPECT_NE(nullptr, a.get());
   }
-  EXPECT_EQ(NULL, a.get());
+  EXPECT_EQ(nullptr, a.get());
 }
 
 TEST(WeakPtrFactoryTest, Dereference) {
@@ -248,6 +270,11 @@
   EXPECT_EQ(ptr.get(), &data);
 }
 
+TEST(WeakPtrTest, ConstructFromNullptr) {
+  WeakPtr<int> ptr = PassThru(nullptr);
+  EXPECT_EQ(nullptr, ptr.get());
+}
+
 TEST(WeakPtrTest, SupportsWeakPtr) {
   Target target;
   WeakPtr<Target> ptr = target.AsWeakPtr();
@@ -260,6 +287,50 @@
   EXPECT_EQ(&target, ptr.get());
 }
 
+TEST(WeakPtrFactoryTest, BooleanTesting) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  WeakPtr<int> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
+}
+
+TEST(WeakPtrFactoryTest, ComparisonToNull) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+
+  WeakPtr<int> null_ptr;
+  EXPECT_EQ(null_ptr, nullptr);
+  EXPECT_EQ(nullptr, null_ptr);
+}
+
 TEST(WeakPtrTest, InvalidateWeakPtrs) {
   int data;
   WeakPtrFactory<int> factory(&data);
@@ -267,7 +338,7 @@
   EXPECT_EQ(&data, ptr.get());
   EXPECT_TRUE(factory.HasWeakPtrs());
   factory.InvalidateWeakPtrs();
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
   EXPECT_FALSE(factory.HasWeakPtrs());
 
   // Test that the factory can create new weak pointers after a
@@ -277,7 +348,7 @@
   EXPECT_EQ(&data, ptr2.get());
   EXPECT_TRUE(factory.HasWeakPtrs());
   factory.InvalidateWeakPtrs();
-  EXPECT_EQ(NULL, ptr2.get());
+  EXPECT_EQ(nullptr, ptr2.get());
   EXPECT_FALSE(factory.HasWeakPtrs());
 }
 
@@ -295,7 +366,7 @@
   // Test that it is OK to create an object that supports WeakPtr on one thread,
   // but use it on another.  This tests that we do not trip runtime checks that
   // ensure that a WeakPtr is not used by multiple threads.
-  scoped_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
+  std::unique_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
   WeakPtr<Target> weak_ptr = target->AsWeakPtr();
   EXPECT_EQ(target.get(), weak_ptr.get());
 }
@@ -304,7 +375,7 @@
   // Test that it is OK to create an object that has a WeakPtr member on one
   // thread, but use it on another.  This tests that we do not trip runtime
   // checks that ensure that a WeakPtr is not used by multiple threads.
-  scoped_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
+  std::unique_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
   Target target;
   arrow->target = target.AsWeakPtr();
   EXPECT_EQ(&target, arrow->target.get());
@@ -377,14 +448,14 @@
   background.Start();
 
   Arrow arrow;
-  scoped_ptr<TargetWithFactory> target(new TargetWithFactory);
+  std::unique_ptr<TargetWithFactory> target(new TargetWithFactory);
 
   // Bind to main thread.
   arrow.target = target->factory.GetWeakPtr();
   EXPECT_EQ(target.get(), arrow.target.get());
 
   target->factory.InvalidateWeakPtrs();
-  EXPECT_EQ(NULL, arrow.target.get());
+  EXPECT_EQ(nullptr, arrow.target.get());
 
   arrow.target = target->factory.GetWeakPtr();
   // Re-bind to background thread.
@@ -447,7 +518,7 @@
     arrow.target = target.AsWeakPtr();
     background.CreateArrowFromArrow(&arrow_copy, &arrow);
   }
-  EXPECT_EQ(NULL, arrow_copy->target.get());
+  EXPECT_EQ(nullptr, arrow_copy->target.get());
   background.DeleteArrow(arrow_copy);
 }
 
@@ -547,7 +618,7 @@
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
 
-  scoped_ptr<Target> target(new Target());
+  std::unique_ptr<Target> target(new Target());
 
   // Main thread creates an arrow referencing the Target.
   Arrow arrow;
@@ -571,7 +642,7 @@
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
 
-  scoped_ptr<Target> target(new Target());
+  std::unique_ptr<Target> target(new Target());
 
   // Main thread creates an arrow referencing the Target, and references it, so
   // that it becomes bound to the thread.
@@ -590,7 +661,7 @@
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
 
-  scoped_ptr<Target> target(new Target());
+  std::unique_ptr<Target> target(new Target());
 
   // Main thread creates an arrow referencing the Target.
   Arrow arrow;
diff --git a/base/memory/weak_ptr_unittest.nc b/base/memory/weak_ptr_unittest.nc
index bad1c97..9b1226b 100644
--- a/base/memory/weak_ptr_unittest.nc
+++ b/base/memory/weak_ptr_unittest.nc
@@ -59,7 +59,7 @@
       SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
 
 void WontCompile() {
   Producer f;
@@ -73,14 +73,14 @@
   WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*const'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::DerivedProducer \*' with an lvalue of type 'base::Producer \*'"]
 
 void WontCompile() {
   Producer f; 
   WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_HELPER_CAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNSAFE_HELPER_CAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
 
 void WontCompile() {
   DerivedProducer f;
@@ -94,14 +94,14 @@
   WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST)  // [r"fatal error: cannot initialize a member subobject of type 'base::OtherDerivedProducer \*' with an lvalue of type 'base::DerivedProducer \*'"]
 
 void WontCompile() {
   DerivedProducer f;
   WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNRELATED_HELPER)  // [r"fatal error: cannot initialize a member subobject of type 'base::Unrelated \*' with an lvalue of type 'base::DerivedProducer \*const'"]
+#elif defined(NCTEST_UNRELATED_HELPER)  // [r"fatal error: cannot initialize a member subobject of type 'base::Unrelated \*' with an lvalue of type 'base::DerivedProducer \*'"]
 
 void WontCompile() {
   DerivedProducer f;
@@ -129,7 +129,7 @@
   WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
 }
 
-#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"fatal error: ambiguous conversion from derived class 'base::MultiplyDerivedProducer' to base class 'base::internal::SupportsWeakPtrBase':"]
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"fatal error: use of undeclared identifier 'AsWeakPtrImpl'"]
 
 void WontCompile() {
   MultiplyDerivedProducer f;
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index 9e3cdc9..bca1d52 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -8,7 +8,6 @@
 
 #include "base/location.h"
 #include "base/message_loop/message_loop.h"
-#include "base/metrics/histogram.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
@@ -18,7 +17,7 @@
 
 namespace {
 
-#ifndef NDEBUG
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
 // Delays larger than this are often bogus, and a warning should be emitted in
 // debug builds to warn developers.  http://crbug.com/450045
 const int kTaskDelayWarningThresholdInSeconds =
@@ -27,17 +26,26 @@
 
 // Returns true if MessagePump::ScheduleWork() must be called one
 // time for every task that is added to the MessageLoop incoming queue.
-#if defined(OS_ANDROID)
 bool AlwaysNotifyPump(MessageLoop::Type type) {
+#if defined(OS_ANDROID)
   // The Android UI message loop needs to get notified each time a task is
-  // added to the incoming queue.
+  // added
+  // to the incoming queue.
   return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
-}
 #else
-bool AlwaysNotifyPump(MessageLoop::Type /* type */) {
+  (void)type;  // Avoid an unused warning.
   return false;
-}
 #endif
+}
+
+TimeTicks CalculateDelayedRuntime(TimeDelta delay) {
+  TimeTicks delayed_run_time;
+  if (delay > TimeDelta())
+    delayed_run_time = TimeTicks::Now() + delay;
+  else
+    DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
+  return delayed_run_time;
+}
 
 }  // namespace
 
@@ -60,7 +68,6 @@
       << "Requesting super-long task delay period of " << delay.InSeconds()
       << " seconds from here: " << from_here.ToString();
 
-  AutoLock locked(incoming_queue_lock_);
   PendingTask pending_task(
       from_here, task, CalculateDelayedRuntime(delay), nestable);
 #if defined(OS_WIN)
@@ -70,7 +77,6 @@
   // resolution on Windows is between 10 and 15ms.
   if (delay > TimeDelta() &&
       delay.InMilliseconds() < (2 * Time::kMinLowResolutionThresholdMs)) {
-    ++high_res_task_count_;
     pending_task.is_high_res = true;
   }
 #endif
@@ -99,7 +105,7 @@
     // incoming queue becomes nonempty we need to schedule it again.
     message_loop_scheduled_ = false;
   } else {
-    incoming_queue_.Swap(work_queue);
+    incoming_queue_.swap(*work_queue);
   }
   // Reset the count of high resolution tasks since our queue is now empty.
   int high_res_tasks = high_res_task_count_;
@@ -108,17 +114,25 @@
 }
 
 void IncomingTaskQueue::WillDestroyCurrentMessageLoop() {
-  AutoLock lock(incoming_queue_lock_);
+  base::subtle::AutoWriteLock lock(message_loop_lock_);
   message_loop_ = NULL;
 }
 
 void IncomingTaskQueue::StartScheduling() {
-  AutoLock lock(incoming_queue_lock_);
-  DCHECK(!is_ready_for_scheduling_);
-  DCHECK(!message_loop_scheduled_);
-  is_ready_for_scheduling_ = true;
-  if (!incoming_queue_.empty())
-    ScheduleWork();
+  bool schedule_work;
+  {
+    AutoLock lock(incoming_queue_lock_);
+    DCHECK(!is_ready_for_scheduling_);
+    DCHECK(!message_loop_scheduled_);
+    is_ready_for_scheduling_ = true;
+    schedule_work = !incoming_queue_.empty();
+  }
+  if (schedule_work) {
+    DCHECK(message_loop_);
+    // Don't need to lock |message_loop_lock_| here because this function is
+    // called by MessageLoop on its thread.
+    message_loop_->ScheduleWork();
+  }
 }
 
 IncomingTaskQueue::~IncomingTaskQueue() {
@@ -126,58 +140,60 @@
   DCHECK(!message_loop_);
 }
 
-TimeTicks IncomingTaskQueue::CalculateDelayedRuntime(TimeDelta delay) {
-  TimeTicks delayed_run_time;
-  if (delay > TimeDelta())
-    delayed_run_time = TimeTicks::Now() + delay;
-  else
-    DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
-  return delayed_run_time;
-}
-
 bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
   // Warning: Don't try to short-circuit, and handle this thread's tasks more
   // directly, as it could starve handling of foreign threads.  Put every task
   // into this queue.
 
-  // This should only be called while the lock is taken.
-  incoming_queue_lock_.AssertAcquired();
+  // Ensures |message_loop_| isn't destroyed while running.
+  base::subtle::AutoReadLock hold_message_loop(message_loop_lock_);
 
   if (!message_loop_) {
     pending_task->task.Reset();
     return false;
   }
 
-  // Initialize the sequence number. The sequence number is used for delayed
-  // tasks (to facilitate FIFO sorting when two tasks have the same
-  // delayed_run_time value) and for identifying the task in about:tracing.
-  pending_task->sequence_num = next_sequence_num_++;
+  bool schedule_work = false;
+  {
+    AutoLock hold(incoming_queue_lock_);
 
-  message_loop_->task_annotator()->DidQueueTask("MessageLoop::PostTask",
-                                                *pending_task);
+#if defined(OS_WIN)
+    if (pending_task->is_high_res)
+      ++high_res_task_count_;
+#endif
 
-  bool was_empty = incoming_queue_.empty();
-  incoming_queue_.push(*pending_task);
-  pending_task->task.Reset();
+    // Initialize the sequence number. The sequence number is used for delayed
+    // tasks (to facilitate FIFO sorting when two tasks have the same
+    // delayed_run_time value) and for identifying the task in about:tracing.
+    pending_task->sequence_num = next_sequence_num_++;
 
-  if (is_ready_for_scheduling_ &&
-      (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
-    ScheduleWork();
+    message_loop_->task_annotator()->DidQueueTask("MessageLoop::PostTask",
+                                                  *pending_task);
+
+    bool was_empty = incoming_queue_.empty();
+    incoming_queue_.push(std::move(*pending_task));
+
+    if (is_ready_for_scheduling_ &&
+        (always_schedule_work_ || (!message_loop_scheduled_ && was_empty))) {
+      schedule_work = true;
+      // After we've scheduled the message loop, we do not need to do so again
+      // until we know it has processed all of the work in our queue and is
+      // waiting for more work again. The message loop will always attempt to
+      // reload from the incoming queue before waiting again so we clear this
+      // flag in ReloadWorkQueue().
+      message_loop_scheduled_ = true;
+    }
   }
 
-  return true;
-}
+  // Wake up the message loop and schedule work. This is done outside
+  // |incoming_queue_lock_| because signaling the message loop may cause this
+  // thread to be switched. If |incoming_queue_lock_| is held, any other thread
+  // that wants to post a task will be blocked until this thread switches back
+  // in and releases |incoming_queue_lock_|.
+  if (schedule_work)
+    message_loop_->ScheduleWork();
 
-void IncomingTaskQueue::ScheduleWork() {
-  DCHECK(is_ready_for_scheduling_);
-  // Wake up the message loop.
-  message_loop_->ScheduleWork();
-  // After we've scheduled the message loop, we do not need to do so again
-  // until we know it has processed all of the work in our queue and is
-  // waiting for more work again. The message loop will always attempt to
-  // reload from the incoming queue before waiting again so we clear this flag
-  // in ReloadWorkQueue().
-  message_loop_scheduled_ = true;
+  return true;
 }
 
 }  // namespace internal
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index e450aa1..aff71d2 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -10,6 +10,7 @@
 #include "base/memory/ref_counted.h"
 #include "base/pending_task.h"
 #include "base/synchronization/lock.h"
+#include "base/synchronization/read_write_lock.h"
 #include "base/time/time.h"
 
 namespace base {
@@ -62,9 +63,6 @@
   friend class RefCountedThreadSafe<IncomingTaskQueue>;
   virtual ~IncomingTaskQueue();
 
-  // Calculates the time at which a PendingTask should run.
-  TimeTicks CalculateDelayedRuntime(TimeDelta delay);
-
   // Adds a task to |incoming_queue_|. The caller retains ownership of
   // |pending_task|, but this function will reset the value of
   // |pending_task->task|. This is needed to ensure that the posting call stack
@@ -78,9 +76,14 @@
   // so that ReloadWorkQueue() completes in constant time.
   int high_res_task_count_;
 
-  // The lock that protects access to the members of this class.
+  // The lock that protects access to the members of this class, except
+  // |message_loop_|.
   base::Lock incoming_queue_lock_;
 
+  // Lock that protects |message_loop_| to prevent it from being deleted while a
+  // task is being posted.
+  base::subtle::ReadWriteLock message_loop_lock_;
+
   // An incoming queue of tasks that are acquired under a mutex for processing
   // on this instance's thread. These tasks have not yet been been pushed to
   // |message_loop_|.
diff --git a/base/message_loop/message_loop.cc b/base/message_loop/message_loop.cc
index e2b8bca..54369a9 100644
--- a/base/message_loop/message_loop.cc
+++ b/base/message_loop/message_loop.cc
@@ -5,19 +5,22 @@
 #include "base/message_loop/message_loop.h"
 
 #include <algorithm>
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
 #include "base/compiler_specific.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/message_loop/message_pump_default.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/run_loop.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/thread_id_name_manager.h"
 #include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
 #include "base/trace_event/trace_event.h"
 #include "base/tracked_objects.h"
@@ -102,7 +105,7 @@
 }
 #endif  // !defined(OS_NACL_SFI)
 
-scoped_ptr<MessagePump> ReturnPump(scoped_ptr<MessagePump> pump) {
+std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
   return pump;
 }
 
@@ -119,6 +122,8 @@
 MessageLoop::DestructionObserver::~DestructionObserver() {
 }
 
+MessageLoop::NestingObserver::~NestingObserver() {}
+
 //------------------------------------------------------------------------------
 
 MessageLoop::MessageLoop(Type type)
@@ -126,7 +131,7 @@
   BindToCurrentThread();
 }
 
-MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
+MessageLoop::MessageLoop(std::unique_ptr<MessagePump> pump)
     : MessageLoop(TYPE_CUSTOM, Bind(&ReturnPump, Passed(&pump))) {
   BindToCurrentThread();
 }
@@ -205,7 +210,7 @@
 }
 
 // static
-scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
+std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
 // TODO(rvargas): Get rid of the OS guards.
 #if defined(USE_GLIB) && !defined(OS_NACL)
   typedef MessagePumpGlib MessagePumpForUI;
@@ -214,21 +219,22 @@
 #endif
 
 #if defined(OS_IOS) || defined(OS_MACOSX)
-#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(MessagePumpMac::Create())
 #elif defined(OS_NACL)
 // Currently NaCl doesn't have a UI MessageLoop.
 // TODO(abarth): Figure out if we need this.
-#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>()
 #else
-#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(new MessagePumpForUI())
 #endif
 
 #if defined(OS_MACOSX)
   // Use an OS native runloop on Mac to support timer coalescing.
-  #define MESSAGE_PUMP_DEFAULT \
-      scoped_ptr<MessagePump>(new MessagePumpCFRunLoop())
+#define MESSAGE_PUMP_DEFAULT \
+  std::unique_ptr<MessagePump>(new MessagePumpCFRunLoop())
 #else
-  #define MESSAGE_PUMP_DEFAULT scoped_ptr<MessagePump>(new MessagePumpDefault())
+#define MESSAGE_PUMP_DEFAULT \
+  std::unique_ptr<MessagePump>(new MessagePumpDefault())
 #endif
 
   if (type == MessageLoop::TYPE_UI) {
@@ -237,11 +243,11 @@
     return MESSAGE_PUMP_UI;
   }
   if (type == MessageLoop::TYPE_IO)
-    return scoped_ptr<MessagePump>(new MessagePumpForIO());
+    return std::unique_ptr<MessagePump>(new MessagePumpForIO());
 
 #if defined(OS_ANDROID)
   if (type == MessageLoop::TYPE_JAVA)
-    return scoped_ptr<MessagePump>(new MessagePumpForUI());
+    return std::unique_ptr<MessagePump>(new MessagePumpForUI());
 #endif
 
   DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
@@ -260,6 +266,16 @@
   destruction_observers_.RemoveObserver(destruction_observer);
 }
 
+void MessageLoop::AddNestingObserver(NestingObserver* observer) {
+  DCHECK_EQ(this, current());
+  nesting_observers_.AddObserver(observer);
+}
+
+void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
+  DCHECK_EQ(this, current());
+  nesting_observers_.RemoveObserver(observer);
+}
+
 void MessageLoop::PostTask(
     const tracked_objects::Location& from_here,
     const Closure& task) {
@@ -273,19 +289,6 @@
   task_runner_->PostDelayedTask(from_here, task, delay);
 }
 
-void MessageLoop::PostNonNestableTask(
-    const tracked_objects::Location& from_here,
-    const Closure& task) {
-  task_runner_->PostNonNestableTask(from_here, task);
-}
-
-void MessageLoop::PostNonNestableDelayedTask(
-    const tracked_objects::Location& from_here,
-    const Closure& task,
-    TimeDelta delay) {
-  task_runner_->PostNonNestableDelayedTask(from_here, task, delay);
-}
-
 void MessageLoop::Run() {
   DCHECK(pump_);
   RunLoop run_loop;
@@ -301,9 +304,9 @@
 void MessageLoop::QuitWhenIdle() {
   DCHECK_EQ(this, current());
   if (run_loop_) {
-    run_loop_->quit_when_idle_received_ = true;
+    run_loop_->QuitWhenIdle();
   } else {
-    NOTREACHED() << "Must be inside Run to call Quit";
+    NOTREACHED() << "Must be inside Run to call QuitWhenIdle";
   }
 }
 
@@ -374,9 +377,10 @@
 //------------------------------------------------------------------------------
 
 // static
-scoped_ptr<MessageLoop> MessageLoop::CreateUnbound(
-    Type type, MessagePumpFactoryCallback pump_factory) {
-  return make_scoped_ptr(new MessageLoop(type, pump_factory));
+std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
+    Type type,
+    MessagePumpFactoryCallback pump_factory) {
+  return WrapUnique(new MessageLoop(type, pump_factory));
 }
 
 MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
@@ -386,18 +390,16 @@
       in_high_res_mode_(false),
 #endif
       nestable_tasks_allowed_(true),
-#if defined(OS_WIN)
-      os_modal_loop_(false),
-#endif  // OS_WIN
       pump_factory_(pump_factory),
       message_histogram_(NULL),
       run_loop_(NULL),
       incoming_task_queue_(new internal::IncomingTaskQueue(this)),
       unbound_task_runner_(
           new internal::MessageLoopTaskRunner(incoming_task_queue_)),
-      task_runner_(unbound_task_runner_) {
+      task_runner_(unbound_task_runner_),
+      thread_id_(kInvalidThreadId) {
   // If type is TYPE_CUSTOM non-null pump_factory must be given.
-  DCHECK_EQ(type_ == TYPE_CUSTOM, !pump_factory_.is_null());
+  DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
 }
 
 void MessageLoop::BindToCurrentThread() {
@@ -414,6 +416,22 @@
   unbound_task_runner_->BindToCurrentThread();
   unbound_task_runner_ = nullptr;
   SetThreadTaskRunnerHandle();
+  {
+    // Save the current thread's ID for potential use by other threads
+    // later from GetThreadName().
+    thread_id_ = PlatformThread::CurrentId();
+    subtle::MemoryBarrier();
+  }
+}
+
+std::string MessageLoop::GetThreadName() const {
+  if (thread_id_ == kInvalidThreadId) {
+    // |thread_id_| may already have been initialized but this thread might not
+    // have received the update yet.
+    subtle::MemoryBarrier();
+    DCHECK_NE(kInvalidThreadId, thread_id_);
+  }
+  return ThreadIdNameManager::GetInstance()->GetName(thread_id_);
 }
 
 void MessageLoop::SetTaskRunner(
@@ -435,17 +453,7 @@
 
 void MessageLoop::RunHandler() {
   DCHECK_EQ(this, current());
-
   StartHistogrammer();
-
-#if defined(OS_WIN)
-  if (run_loop_->dispatcher_ && type() == TYPE_UI) {
-    static_cast<MessagePumpForUI*>(pump_.get())->
-        RunWithDispatcher(this, run_loop_->dispatcher_);
-    return;
-  }
-#endif
-
   pump_->Run(this);
 }
 
@@ -456,7 +464,8 @@
   if (deferred_non_nestable_work_queue_.empty())
     return false;
 
-  PendingTask pending_task = deferred_non_nestable_work_queue_.front();
+  PendingTask pending_task =
+      std::move(deferred_non_nestable_work_queue_.front());
   deferred_non_nestable_work_queue_.pop();
 
   RunTask(pending_task);
@@ -489,7 +498,7 @@
   nestable_tasks_allowed_ = true;
 }
 
-bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
+bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
   if (pending_task.nestable || run_loop_->run_depth_ == 1) {
     RunTask(pending_task);
     // Show that we ran a task (Note: a new one might arrive as a
@@ -499,25 +508,25 @@
 
   // We couldn't run the task now because we're in a nested message loop
   // and the task isn't nestable.
-  deferred_non_nestable_work_queue_.push(pending_task);
+  deferred_non_nestable_work_queue_.push(std::move(pending_task));
   return false;
 }
 
-void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
+void MessageLoop::AddToDelayedWorkQueue(PendingTask pending_task) {
   // Move to the delayed work queue.
-  delayed_work_queue_.push(pending_task);
+  delayed_work_queue_.push(std::move(pending_task));
 }
 
 bool MessageLoop::DeletePendingTasks() {
   bool did_work = !work_queue_.empty();
   while (!work_queue_.empty()) {
-    PendingTask pending_task = work_queue_.front();
+    PendingTask pending_task = std::move(work_queue_.front());
     work_queue_.pop();
     if (!pending_task.delayed_run_time.is_null()) {
       // We want to delete delayed tasks in the same order in which they would
       // normally be deleted in case of any funny dependencies between delayed
       // tasks.
-      AddToDelayedWorkQueue(pending_task);
+      AddToDelayedWorkQueue(std::move(pending_task));
     }
   }
   did_work |= !deferred_non_nestable_work_queue_.empty();
@@ -556,6 +565,12 @@
   pump_->ScheduleWork();
 }
 
+#if defined(OS_WIN)
+bool MessageLoop::MessagePumpWasSignaled() {
+  return pump_->WasSignaled();
+}
+#endif
+
 //------------------------------------------------------------------------------
 // Method and data for histogramming events and actions taken by each instance
 // on each thread.
@@ -564,13 +579,12 @@
 #if !defined(OS_NACL)  // NaCl build has no metrics code.
   if (enable_histogrammer_ && !message_histogram_
       && StatisticsRecorder::IsActive()) {
-    DCHECK(!thread_name_.empty());
+    std::string thread_name = GetThreadName();
+    DCHECK(!thread_name.empty());
     message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
-        "MsgLoop:" + thread_name_,
-        kLeastNonZeroMessageId, kMaxMessageId,
+        "MsgLoop:" + thread_name, kLeastNonZeroMessageId, kMaxMessageId,
         kNumberOfDistinctMessagesDisplayed,
-        HistogramBase::kHexRangePrintingFlag,
-        event_descriptions_);
+        HistogramBase::kHexRangePrintingFlag, event_descriptions_);
   }
 #endif
 }
@@ -582,6 +596,11 @@
 #endif
 }
 
+void MessageLoop::NotifyBeginNestedLoop() {
+  FOR_EACH_OBSERVER(NestingObserver, nesting_observers_,
+                    OnBeginNestedMessageLoop());
+}
+
 bool MessageLoop::DoWork() {
   if (!nestable_tasks_allowed_) {
     // Task can't be executed right now.
@@ -595,15 +614,17 @@
 
     // Execute oldest task.
     do {
-      PendingTask pending_task = work_queue_.front();
+      PendingTask pending_task = std::move(work_queue_.front());
       work_queue_.pop();
       if (!pending_task.delayed_run_time.is_null()) {
-        AddToDelayedWorkQueue(pending_task);
+        int sequence_num = pending_task.sequence_num;
+        TimeTicks delayed_run_time = pending_task.delayed_run_time;
+        AddToDelayedWorkQueue(std::move(pending_task));
         // If we changed the topmost task, then it is time to reschedule.
-        if (delayed_work_queue_.top().task.Equals(pending_task.task))
-          pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
+        if (delayed_work_queue_.top().sequence_num == sequence_num)
+          pump_->ScheduleDelayedWork(delayed_run_time);
       } else {
-        if (DeferOrRunPendingTask(pending_task))
+        if (DeferOrRunPendingTask(std::move(pending_task)))
           return true;
       }
     } while (!work_queue_.empty());
@@ -635,13 +656,14 @@
     }
   }
 
-  PendingTask pending_task = delayed_work_queue_.top();
+  PendingTask pending_task =
+      std::move(const_cast<PendingTask&>(delayed_work_queue_.top()));
   delayed_work_queue_.pop();
 
   if (!delayed_work_queue_.empty())
     *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
 
-  return DeferOrRunPendingTask(pending_task);
+  return DeferOrRunPendingTask(std::move(pending_task));
 }
 
 bool MessageLoop::DoIdleWork() {
@@ -669,20 +691,23 @@
 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
                                      void(*deleter)(const void*),
                                      const void* object) {
-  PostNonNestableTask(from_here, Bind(deleter, object));
+  task_runner()->PostNonNestableTask(from_here, Bind(deleter, object));
 }
 
 void MessageLoop::ReleaseSoonInternal(
     const tracked_objects::Location& from_here,
     void(*releaser)(const void*),
     const void* object) {
-  PostNonNestableTask(from_here, Bind(releaser, object));
+  task_runner()->PostNonNestableTask(from_here, Bind(releaser, object));
 }
 
 #if !defined(OS_NACL)
 //------------------------------------------------------------------------------
 // MessageLoopForUI
 
+MessageLoopForUI::MessageLoopForUI(std::unique_ptr<MessagePump> pump)
+    : MessageLoop(TYPE_UI, Bind(&ReturnPump, Passed(&pump))) {}
+
 #if defined(OS_ANDROID)
 void MessageLoopForUI::Start() {
   // No Histogram support for UI message loop as it is managed by Java side
@@ -720,15 +745,6 @@
 MessageLoopForIO::MessageLoopForIO() : MessageLoop(TYPE_IO) {}
 
 #if !defined(OS_NACL_SFI)
-void MessageLoopForIO::AddIOObserver(
-    MessageLoopForIO::IOObserver* io_observer) {
-  ToPumpIO(pump_.get())->AddIOObserver(io_observer);
-}
-
-void MessageLoopForIO::RemoveIOObserver(
-    MessageLoopForIO::IOObserver* io_observer) {
-  ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
-}
 
 #if defined(OS_WIN)
 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index e78b704..ac522cf 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -5,6 +5,7 @@
 #ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
 #define BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
 
+#include <memory>
 #include <queue>
 #include <string>
 
@@ -15,7 +16,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/incoming_task_queue.h"
 #include "base/message_loop/message_loop_task_runner.h"
 #include "base/message_loop/message_pump.h"
@@ -115,7 +115,7 @@
   explicit MessageLoop(Type type = TYPE_DEFAULT);
   // Creates a TYPE_CUSTOM MessageLoop with the supplied MessagePump, which must
   // be non-NULL.
-  explicit MessageLoop(scoped_ptr<MessagePump> pump);
+  explicit MessageLoop(std::unique_ptr<MessagePump> pump);
 
   ~MessageLoop() override;
 
@@ -124,7 +124,7 @@
 
   static void EnableHistogrammer(bool enable_histogrammer);
 
-  typedef scoped_ptr<MessagePump> (MessagePumpFactory)();
+  typedef std::unique_ptr<MessagePump>(MessagePumpFactory)();
   // Uses the given base::MessagePumpForUIFactory to override the default
   // MessagePump implementation for 'TYPE_UI'. Returns true if the factory
   // was successfully registered.
@@ -132,7 +132,8 @@
 
   // Creates the default MessagePump based on |type|. Caller owns return
   // value.
-  static scoped_ptr<MessagePump> CreateMessagePumpForType(Type type);
+  static std::unique_ptr<MessagePump> CreateMessagePumpForType(Type type);
+
   // A DestructionObserver is notified when the current MessageLoop is being
   // destroyed.  These observers are notified prior to MessageLoop::current()
   // being changed to return NULL.  This gives interested parties the chance to
@@ -157,6 +158,19 @@
   // DestructionObserver is receiving a notification callback.
   void RemoveDestructionObserver(DestructionObserver* destruction_observer);
 
+  // A NestingObserver is notified when a nested message loop begins. The
+  // observers are notified before the first task is processed.
+  class BASE_EXPORT NestingObserver {
+   public:
+    virtual void OnBeginNestedMessageLoop() = 0;
+
+   protected:
+    virtual ~NestingObserver();
+  };
+
+  void AddNestingObserver(NestingObserver* observer);
+  void RemoveNestingObserver(NestingObserver* observer);
+
   // NOTE: Deprecated; prefer task_runner() and the TaskRunner interfaces.
   // TODO(skyostil): Remove these functions (crbug.com/465354).
   //
@@ -186,13 +200,6 @@
                        const Closure& task,
                        TimeDelta delay);
 
-  void PostNonNestableTask(const tracked_objects::Location& from_here,
-                           const Closure& task);
-
-  void PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  const Closure& task,
-                                  TimeDelta delay);
-
   // A variant on PostTask that deletes the given object.  This is useful
   // if the object needs to live until the next run of the MessageLoop (for
   // example, deleting a RenderProcessHost from within an IPC callback is not
@@ -284,12 +291,10 @@
   // Returns the type passed to the constructor.
   Type type() const { return type_; }
 
-  // Optional call to connect the thread name with this loop.
-  void set_thread_name(const std::string& thread_name) {
-    DCHECK(thread_name_.empty()) << "Should not rename this thread!";
-    thread_name_ = thread_name;
-  }
-  const std::string& thread_name() const { return thread_name_; }
+  // Returns the name of the thread this message loop is bound to.
+  // This function is only valid when this message loop is running and
+  // BindToCurrentThread has already been called.
+  std::string GetThreadName() const;
 
   // Gets the TaskRunner associated with this message loop.
   const scoped_refptr<SingleThreadTaskRunner>& task_runner() {
@@ -368,16 +373,6 @@
   void AddTaskObserver(TaskObserver* task_observer);
   void RemoveTaskObserver(TaskObserver* task_observer);
 
-#if defined(OS_WIN)
-  void set_os_modal_loop(bool os_modal_loop) {
-    os_modal_loop_ = os_modal_loop;
-  }
-
-  bool os_modal_loop() const {
-    return os_modal_loop_;
-  }
-#endif  // OS_WIN
-
   // Can only be called from the thread that owns the MessageLoop.
   bool is_running() const;
 
@@ -395,9 +390,31 @@
   // Runs the specified PendingTask.
   void RunTask(const PendingTask& pending_task);
 
+#if defined(OS_WIN)
+  // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+  // has been investigated.
+  // This should be used for diagnostic only. If message pump wake-up mechanism
+  // is based on auto-reset event this call would reset the event to unset
+  // state.
+  bool MessagePumpWasSignaled();
+#endif
+
   //----------------------------------------------------------------------------
  protected:
-  scoped_ptr<MessagePump> pump_;
+  std::unique_ptr<MessagePump> pump_;
+
+  using MessagePumpFactoryCallback = Callback<std::unique_ptr<MessagePump>()>;
+
+  // Common protected constructor. Other constructors delegate the
+  // initialization to this constructor.
+  // A subclass can invoke this constructor to create a message_loop of a
+  // specific type with a custom loop. The implementation does not call
+  // BindToCurrentThread. If this constructor is invoked directly by a subclass,
+  // then the subclass must subsequently bind the message loop.
+  MessageLoop(Type type, MessagePumpFactoryCallback pump_factory);
+
+  // Configure various members and bind this message loop to the current thread.
+  void BindToCurrentThread();
 
  private:
   friend class RunLoop;
@@ -406,8 +423,6 @@
   friend class Thread;
   FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
 
-  using MessagePumpFactoryCallback = Callback<scoped_ptr<MessagePump>()>;
-
   // Creates a MessageLoop without binding to a thread.
   // If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
   // to create a message pump for this message loop.  Otherwise a default
@@ -419,17 +434,10 @@
   // thread the message loop runs on, before calling Run().
   // Before BindToCurrentThread() is called, only Post*Task() functions can
   // be called on the message loop.
-  static scoped_ptr<MessageLoop> CreateUnbound(
+  static std::unique_ptr<MessageLoop> CreateUnbound(
       Type type,
       MessagePumpFactoryCallback pump_factory);
 
-  // Common private constructor. Other constructors delegate the initialization
-  // to this constructor.
-  MessageLoop(Type type, MessagePumpFactoryCallback pump_factory);
-
-  // Configure various members and bind this message loop to the current thread.
-  void BindToCurrentThread();
-
   // Sets the ThreadTaskRunnerHandle for the current thread to point to the
   // task runner for this message loop.
   void SetThreadTaskRunnerHandle();
@@ -442,10 +450,10 @@
 
   // Calls RunTask or queues the pending_task on the deferred task list if it
   // cannot be run right now.  Returns true if the task was run.
-  bool DeferOrRunPendingTask(const PendingTask& pending_task);
+  bool DeferOrRunPendingTask(PendingTask pending_task);
 
   // Adds the pending task to delayed_work_queue_.
-  void AddToDelayedWorkQueue(const PendingTask& pending_task);
+  void AddToDelayedWorkQueue(PendingTask pending_task);
 
   // Delete tasks that haven't run yet without running them.  Used in the
   // destructor to make sure all the task's destructors get called.  Returns
@@ -469,6 +477,9 @@
   // If message_histogram_ is NULL, this is a no-op.
   void HistogramEvent(int event);
 
+  // Notify observers that a nested message loop is starting.
+  void NotifyBeginNestedLoop();
+
   // MessagePump::Delegate methods:
   bool DoWork() override;
   bool DoDelayedWork(TimeTicks* next_delayed_work_time) override;
@@ -503,21 +514,16 @@
 
   ObserverList<DestructionObserver> destruction_observers_;
 
+  ObserverList<NestingObserver> nesting_observers_;
+
   // A recursion block that prevents accidentally running additional tasks when
   // insider a (accidentally induced?) nested message pump.
   bool nestable_tasks_allowed_;
 
-#if defined(OS_WIN)
-  // Should be set to true before calling Windows APIs like TrackPopupMenu, etc.
-  // which enter a modal message loop.
-  bool os_modal_loop_;
-#endif
-
   // pump_factory_.Run() is called to create a message pump for this loop
   // if type_ is TYPE_CUSTOM and pump_ is null.
   MessagePumpFactoryCallback pump_factory_;
 
-  std::string thread_name_;
   // A profiling histogram showing the counts of various messages and events.
   HistogramBase* message_histogram_;
 
@@ -534,7 +540,10 @@
 
   // The task runner associated with this message loop.
   scoped_refptr<SingleThreadTaskRunner> task_runner_;
-  scoped_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+  std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+
+  // Id of the thread this message loop is bound to.
+  PlatformThreadId thread_id_;
 
   template <class T, class R> friend class base::subtle::DeleteHelperInternal;
   template <class T, class R> friend class base::subtle::ReleaseHelperInternal;
@@ -563,17 +572,19 @@
   MessageLoopForUI() : MessageLoop(TYPE_UI) {
   }
 
+  explicit MessageLoopForUI(std::unique_ptr<MessagePump> pump);
+
   // Returns the MessageLoopForUI of the current thread.
   static MessageLoopForUI* current() {
     MessageLoop* loop = MessageLoop::current();
     DCHECK(loop);
-    DCHECK_EQ(MessageLoop::TYPE_UI, loop->type());
+    DCHECK(loop->IsType(MessageLoop::TYPE_UI));
     return static_cast<MessageLoopForUI*>(loop);
   }
 
   static bool IsCurrent() {
     MessageLoop* loop = MessageLoop::current();
-    return loop && loop->type() == MessageLoop::TYPE_UI;
+    return loop && loop->IsType(MessageLoop::TYPE_UI);
   }
 
 #if defined(OS_IOS)
@@ -623,6 +634,9 @@
   // Returns the MessageLoopForIO of the current thread.
   static MessageLoopForIO* current() {
     MessageLoop* loop = MessageLoop::current();
+    DCHECK(loop) << "Can't call MessageLoopForIO::current() when no message "
+                    "loop was created for this thread. Use "
+                    " MessageLoop::current() or MessageLoopForIO::IsCurrent().";
     DCHECK_EQ(MessageLoop::TYPE_IO, loop->type());
     return static_cast<MessageLoopForIO*>(loop);
   }
@@ -637,12 +651,10 @@
 #if defined(OS_WIN)
   typedef MessagePumpForIO::IOHandler IOHandler;
   typedef MessagePumpForIO::IOContext IOContext;
-  typedef MessagePumpForIO::IOObserver IOObserver;
 #elif defined(OS_IOS)
   typedef MessagePumpIOSForIO::Watcher Watcher;
   typedef MessagePumpIOSForIO::FileDescriptorWatcher
       FileDescriptorWatcher;
-  typedef MessagePumpIOSForIO::IOObserver IOObserver;
 
   enum Mode {
     WATCH_READ = MessagePumpIOSForIO::WATCH_READ,
@@ -653,7 +665,6 @@
   typedef MessagePumpLibevent::Watcher Watcher;
   typedef MessagePumpLibevent::FileDescriptorWatcher
       FileDescriptorWatcher;
-  typedef MessagePumpLibevent::IOObserver IOObserver;
 
   enum Mode {
     WATCH_READ = MessagePumpLibevent::WATCH_READ,
@@ -662,9 +673,6 @@
   };
 #endif
 
-  void AddIOObserver(IOObserver* io_observer);
-  void RemoveIOObserver(IOObserver* io_observer);
-
 #if defined(OS_WIN)
   // Please see MessagePumpWin for definitions of these methods.
   void RegisterIOHandler(HANDLE file, IOHandler* handler);
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index 0442e7c..cabd250 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -4,14 +4,18 @@
 
 #include "base/message_loop/message_loop_task_runner.h"
 
+#include <memory>
+
 #include "base/atomic_sequence_num.h"
 #include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/debug/leak_annotations.h"
 #include "base/message_loop/message_loop.h"
 #include "base/message_loop/message_loop_task_runner.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
 
@@ -22,7 +26,8 @@
   MessageLoopTaskRunnerTest()
       : current_loop_(new MessageLoop()),
         task_thread_("task_thread"),
-        thread_sync_(true, false) {}
+        thread_sync_(WaitableEvent::ResetPolicy::MANUAL,
+                     WaitableEvent::InitialState::NOT_SIGNALED) {}
 
   void DeleteCurrentMessageLoop() { current_loop_.reset(); }
 
@@ -33,7 +38,7 @@
     task_thread_.Start();
 
     // Allow us to pause the |task_thread_|'s MessageLoop.
-    task_thread_.message_loop()->PostTask(
+    task_thread_.message_loop()->task_runner()->PostTask(
         FROM_HERE, Bind(&MessageLoopTaskRunnerTest::BlockTaskThreadHelper,
                         Unretained(this)));
   }
@@ -87,7 +92,7 @@
 
   static StaticAtomicSequenceNumber g_order;
 
-  scoped_ptr<MessageLoop> current_loop_;
+  std::unique_ptr<MessageLoop> current_loop_;
   Thread task_thread_;
 
  private:
@@ -197,6 +202,8 @@
 }
 
 TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
+  // Annotate the scope as having memory leaks to suppress heapchecker reports.
+  ANNOTATE_SCOPED_MEMORY_LEAK;
   MessageLoop* task_run_on = NULL;
   MessageLoop* task_deleted_on = NULL;
   int task_delete_order = -1;
@@ -253,7 +260,8 @@
   }
 
   void Quit() const {
-    loop_.PostTask(FROM_HERE, MessageLoop::QuitWhenIdleClosure());
+    loop_.task_runner()->PostTask(FROM_HERE,
+                                  MessageLoop::QuitWhenIdleClosure());
   }
 
   void AssertOnIOThread() const {
@@ -300,8 +308,8 @@
     MessageLoopTaskRunnerThreadingTest* test_;
   };
 
-  scoped_ptr<Thread> io_thread_;
-  scoped_ptr<Thread> file_thread_;
+  std::unique_ptr<Thread> io_thread_;
+  std::unique_ptr<Thread> file_thread_;
 
  private:
   mutable MessageLoop loop_;
@@ -309,25 +317,25 @@
 
 TEST_F(MessageLoopTaskRunnerThreadingTest, Release) {
   EXPECT_TRUE(io_thread_->task_runner()->ReleaseSoon(FROM_HERE, this));
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 }
 
 TEST_F(MessageLoopTaskRunnerThreadingTest, Delete) {
   DeletedOnFile* deleted_on_file = new DeletedOnFile(this);
   EXPECT_TRUE(
       file_thread_->task_runner()->DeleteSoon(FROM_HERE, deleted_on_file));
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 }
 
 TEST_F(MessageLoopTaskRunnerThreadingTest, PostTask) {
   EXPECT_TRUE(file_thread_->task_runner()->PostTask(
       FROM_HERE, Bind(&MessageLoopTaskRunnerThreadingTest::BasicFunction,
                       Unretained(this))));
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 }
 
 TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadExits) {
-  scoped_ptr<Thread> test_thread(
+  std::unique_ptr<Thread> test_thread(
       new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
   test_thread->Start();
   scoped_refptr<SingleThreadTaskRunner> task_runner =
@@ -342,7 +350,7 @@
 TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadIsDeleted) {
   scoped_refptr<SingleThreadTaskRunner> task_runner;
   {
-    scoped_ptr<Thread> test_thread(
+    std::unique_ptr<Thread> test_thread(
         new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
     test_thread->Start();
     task_runner = test_thread->task_runner();
diff --git a/base/message_loop/message_loop_test.cc b/base/message_loop/message_loop_test.cc
index ac50d64..1ab946f 100644
--- a/base/message_loop/message_loop_test.cc
+++ b/base/message_loop/message_loop_test.cc
@@ -12,6 +12,7 @@
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread.h"
 
@@ -91,37 +92,37 @@
 }  // namespace
 
 void RunTest_PostTask(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
   // Add tests to message loop
   scoped_refptr<Foo> foo(new Foo());
   std::string a("a"), b("b"), c("c"), d("d");
-  MessageLoop::current()->PostTask(FROM_HERE, Bind(
-      &Foo::Test0, foo.get()));
-  MessageLoop::current()->PostTask(FROM_HERE, Bind(
-    &Foo::Test1ConstRef, foo.get(), a));
-  MessageLoop::current()->PostTask(FROM_HERE, Bind(
-      &Foo::Test1Ptr, foo.get(), &b));
-  MessageLoop::current()->PostTask(FROM_HERE, Bind(
-      &Foo::Test1Int, foo.get(), 100));
-  MessageLoop::current()->PostTask(FROM_HERE, Bind(
-      &Foo::Test2Ptr, foo.get(), &a, &c));
-  MessageLoop::current()->PostTask(FROM_HERE, Bind(
-      &Foo::Test2Mixed, foo.get(), a, &d));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&Foo::Test0, foo.get()));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test1ConstRef, foo.get(), a));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test1Ptr, foo.get(), &b));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test1Int, foo.get(), 100));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test2Ptr, foo.get(), &a, &c));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&Foo::Test2Mixed, foo.get(), a, &d));
   // After all tests, post a message that will shut down the message loop
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
 
   // Now kick things off
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 
   EXPECT_EQ(foo->test_count(), 105);
   EXPECT_EQ(foo->result(), "abacad");
 }
 
 void RunTest_PostDelayedTask_Basic(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that PostDelayedTask results in a delayed task.
@@ -131,12 +132,11 @@
   int num_tasks = 1;
   Time run_time;
 
-  loop.PostDelayedTask(
-      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
-      kDelay);
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
 
   Time time_before_run = Time::Now();
-  loop.Run();
+  RunLoop().Run();
   Time time_after_run = Time::Now();
 
   EXPECT_EQ(0, num_tasks);
@@ -144,32 +144,30 @@
 }
 
 void RunTest_PostDelayedTask_InDelayOrder(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that two tasks with different delays run in the right order.
   int num_tasks = 2;
   Time run_time1, run_time2;
 
-  loop.PostDelayedTask(
-      FROM_HERE,
-      Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
       TimeDelta::FromMilliseconds(200));
   // If we get a large pause in execution (due to a context switch) here, this
   // test could fail.
-  loop.PostDelayedTask(
-      FROM_HERE,
-      Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
       TimeDelta::FromMilliseconds(10));
 
-  loop.Run();
+  RunLoop().Run();
   EXPECT_EQ(0, num_tasks);
 
   EXPECT_TRUE(run_time2 < run_time1);
 }
 
 void RunTest_PostDelayedTask_InPostOrder(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that two tasks with the same delay run in the order in which they
@@ -185,21 +183,19 @@
   int num_tasks = 2;
   Time run_time1, run_time2;
 
-  loop.PostDelayedTask(
-      FROM_HERE,
-      Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
-  loop.PostDelayedTask(
-      FROM_HERE,
-      Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
 
-  loop.Run();
+  RunLoop().Run();
   EXPECT_EQ(0, num_tasks);
 
   EXPECT_TRUE(run_time1 < run_time2);
 }
 
 void RunTest_PostDelayedTask_InPostOrder_2(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that a delayed task still runs after a normal tasks even if the
@@ -210,14 +206,13 @@
   int num_tasks = 2;
   Time run_time;
 
-  loop.PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
-  loop.PostDelayedTask(
-      FROM_HERE,
-      Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
+  loop.task_runner()->PostTask(FROM_HERE, Bind(&SlowFunc, kPause, &num_tasks));
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time, &num_tasks),
       TimeDelta::FromMilliseconds(10));
 
   Time time_before_run = Time::Now();
-  loop.Run();
+  RunLoop().Run();
   Time time_after_run = Time::Now();
 
   EXPECT_EQ(0, num_tasks);
@@ -226,7 +221,7 @@
 }
 
 void RunTest_PostDelayedTask_InPostOrder_3(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that a delayed task still runs after a pile of normal tasks.  The key
@@ -240,21 +235,21 @@
 
   // Clutter the ML with tasks.
   for (int i = 1; i < num_tasks; ++i)
-    loop.PostTask(FROM_HERE,
-                  Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
+    loop.task_runner()->PostTask(
+        FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks));
 
-  loop.PostDelayedTask(
+  loop.task_runner()->PostDelayedTask(
       FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
       TimeDelta::FromMilliseconds(1));
 
-  loop.Run();
+  RunLoop().Run();
   EXPECT_EQ(0, num_tasks);
 
   EXPECT_TRUE(run_time2 > run_time1);
 }
 
 void RunTest_PostDelayedTask_SharedTimer(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that the interval of the timer, used to run the next delayed task, is
@@ -265,18 +260,16 @@
   int num_tasks = 1;
   Time run_time1, run_time2;
 
-  loop.PostDelayedTask(
-      FROM_HERE,
-      Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time1, &num_tasks),
       TimeDelta::FromSeconds(1000));
-  loop.PostDelayedTask(
-      FROM_HERE,
-      Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
+  loop.task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&RecordRunTimeFunc, &run_time2, &num_tasks),
       TimeDelta::FromMilliseconds(10));
 
   Time start_time = Time::Now();
 
-  loop.Run();
+  RunLoop().Run();
   EXPECT_EQ(0, num_tasks);
 
   // Ensure that we ran in far less time than the slower timer.
@@ -309,7 +302,7 @@
   ~RecordDeletionProbe() {
     *was_deleted_ = true;
     if (post_on_delete_.get())
-      MessageLoop::current()->PostTask(
+      MessageLoop::current()->task_runner()->PostTask(
           FROM_HERE, Bind(&RecordDeletionProbe::Run, post_on_delete_.get()));
   }
 
@@ -321,15 +314,15 @@
   bool a_was_deleted = false;
   bool b_was_deleted = false;
   {
-    scoped_ptr<MessagePump> pump(factory());
+    std::unique_ptr<MessagePump> pump(factory());
     MessageLoop loop(std::move(pump));
-    loop.PostTask(
+    loop.task_runner()->PostTask(
         FROM_HERE, Bind(&RecordDeletionProbe::Run,
-                              new RecordDeletionProbe(NULL, &a_was_deleted)));
+                        new RecordDeletionProbe(NULL, &a_was_deleted)));
     // TODO(ajwong): Do we really need 1000ms here?
-    loop.PostDelayedTask(
+    loop.task_runner()->PostDelayedTask(
         FROM_HERE, Bind(&RecordDeletionProbe::Run,
-                              new RecordDeletionProbe(NULL, &b_was_deleted)),
+                        new RecordDeletionProbe(NULL, &b_was_deleted)),
         TimeDelta::FromMilliseconds(1000));
   }
   EXPECT_TRUE(a_was_deleted);
@@ -341,14 +334,14 @@
   bool b_was_deleted = false;
   bool c_was_deleted = false;
   {
-    scoped_ptr<MessagePump> pump(factory());
+    std::unique_ptr<MessagePump> pump(factory());
     MessageLoop loop(std::move(pump));
     // The scoped_refptr for each of the below is held either by the chained
     // RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
     RecordDeletionProbe* a = new RecordDeletionProbe(NULL, &a_was_deleted);
     RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
     RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
-    loop.PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
+    loop.task_runner()->PostTask(FROM_HERE, Bind(&RecordDeletionProbe::Run, c));
   }
   EXPECT_TRUE(a_was_deleted);
   EXPECT_TRUE(b_was_deleted);
@@ -358,26 +351,88 @@
 void NestingFunc(int* depth) {
   if (*depth > 0) {
     *depth -= 1;
-    MessageLoop::current()->PostTask(FROM_HERE,
-                                     Bind(&NestingFunc, depth));
+    MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                    Bind(&NestingFunc, depth));
 
     MessageLoop::current()->SetNestableTasksAllowed(true);
-    MessageLoop::current()->Run();
+    RunLoop().Run();
   }
   MessageLoop::current()->QuitWhenIdle();
 }
 
 void RunTest_Nesting(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   int depth = 100;
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&NestingFunc, &depth));
-  MessageLoop::current()->Run();
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&NestingFunc, &depth));
+  RunLoop().Run();
   EXPECT_EQ(depth, 0);
 }
 
+// A NestingObserver that tracks the number of nested message loop starts it
+// has seen.
+class TestNestingObserver : public MessageLoop::NestingObserver {
+ public:
+  TestNestingObserver() {}
+  ~TestNestingObserver() override {}
+
+  int begin_nested_loop_count() const { return begin_nested_loop_count_; }
+
+  // MessageLoop::NestingObserver:
+  void OnBeginNestedMessageLoop() override { begin_nested_loop_count_++; }
+
+ private:
+  int begin_nested_loop_count_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(TestNestingObserver);
+};
+
+void ExpectOneBeginNestedLoop(TestNestingObserver* observer) {
+  EXPECT_EQ(1, observer->begin_nested_loop_count());
+}
+
+// Starts a nested message loop.
+void RunNestedLoop(TestNestingObserver* observer,
+                   const Closure& quit_outer_loop) {
+  // The nested loop hasn't started yet.
+  EXPECT_EQ(0, observer->begin_nested_loop_count());
+
+  MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
+  RunLoop nested_loop;
+  // Verify that by the time the first task is run the observer has seen the
+  // message loop begin.
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&ExpectOneBeginNestedLoop, observer));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop.QuitClosure());
+  nested_loop.Run();
+
+  // Quitting message loops doesn't change the begin count.
+  EXPECT_EQ(1, observer->begin_nested_loop_count());
+
+  quit_outer_loop.Run();
+}
+
+// Tests that a NestingObserver is notified when a nested message loop begins.
+void RunTest_NestingObserver(MessagePumpFactory factory) {
+  std::unique_ptr<MessagePump> pump(factory());
+  MessageLoop outer_loop(std::move(pump));
+
+  // Observe the outer loop for nested message loops beginning.
+  TestNestingObserver nesting_observer;
+  outer_loop.AddNestingObserver(&nesting_observer);
+
+  // Post a task that runs a nested message loop.
+  outer_loop.task_runner()->PostTask(FROM_HERE,
+                                     Bind(&RunNestedLoop, &nesting_observer,
+                                          outer_loop.QuitWhenIdleClosure()));
+  RunLoop().Run();
+
+  outer_loop.RemoveNestingObserver(&nesting_observer);
+}
+
 enum TaskType {
   MESSAGEBOX,
   ENDDIALOG,
@@ -463,7 +518,7 @@
   if (depth > 0) {
     if (is_reentrant)
       MessageLoop::current()->SetNestableTasksAllowed(true);
-    MessageLoop::current()->PostTask(
+    MessageLoop::current()->task_runner()->PostTask(
         FROM_HERE,
         Bind(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
   }
@@ -476,22 +531,19 @@
   order->RecordEnd(QUITMESSAGELOOP, cookie);
 }
 void RunTest_RecursiveDenial1(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
   TaskList order;
-  MessageLoop::current()->PostTask(
-      FROM_HERE,
-      Bind(&RecursiveFunc, &order, 1, 2, false));
-  MessageLoop::current()->PostTask(
-      FROM_HERE,
-      Bind(&RecursiveFunc, &order, 2, 2, false));
-  MessageLoop::current()->PostTask(
-      FROM_HERE,
-      Bind(&QuitFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, false));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, false));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&QuitFunc, &order, 3));
 
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 
   // FIFO order.
   ASSERT_EQ(14U, order.Size());
@@ -523,25 +575,21 @@
 }
 
 void RunTest_RecursiveDenial3(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
   TaskList order;
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveSlowFunc, &order, 1, 2, false));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveSlowFunc, &order, 2, 2, false));
-  MessageLoop::current()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&OrderedFunc, &order, 3),
-      TimeDelta::FromMilliseconds(5));
-  MessageLoop::current()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&QuitFunc, &order, 4),
-      TimeDelta::FromMilliseconds(5));
+  MessageLoop::current()->task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 3), TimeDelta::FromMilliseconds(5));
+  MessageLoop::current()->task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&QuitFunc, &order, 4), TimeDelta::FromMilliseconds(5));
 
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 
   // FIFO order.
   ASSERT_EQ(16U, order.Size());
@@ -564,18 +612,18 @@
 }
 
 void RunTest_RecursiveSupport1(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveFunc, &order, 1, 2, true));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&RecursiveFunc, &order, 2, 2, true));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&QuitFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&QuitFunc, &order, 3));
 
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 
   // FIFO order.
   ASSERT_EQ(14U, order.Size());
@@ -597,19 +645,20 @@
 
 // Tests that non nestable tasks run in FIFO if there are no nested loops.
 void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
 
-  MessageLoop::current()->PostNonNestableTask(
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
       FROM_HERE,
       Bind(&OrderedFunc, &order, 1));
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&OrderedFunc, &order, 2));
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&QuitFunc, &order, 3));
-  MessageLoop::current()->Run();
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&QuitFunc, &order, 3));
+  RunLoop().Run();
 
   // FIFO order.
   ASSERT_EQ(6U, order.Size());
@@ -637,45 +686,32 @@
 }
 
 // Tests that non nestable tasks don't run when there's code in the call stack.
-void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory,
-                                     bool use_delayed) {
-  scoped_ptr<MessagePump> pump(factory());
+void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory) {
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
 
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&FuncThatPumps, &order, 1));
-  if (use_delayed) {
-    MessageLoop::current()->PostNonNestableDelayedTask(
-        FROM_HERE,
-        Bind(&OrderedFunc, &order, 2),
-        TimeDelta::FromMilliseconds(1));
-  } else {
-    MessageLoop::current()->PostNonNestableTask(
-        FROM_HERE,
-        Bind(&OrderedFunc, &order, 2));
-  }
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&OrderedFunc, &order, 3));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&OrderedFunc, &order, 5));
-  if (use_delayed) {
-    MessageLoop::current()->PostNonNestableDelayedTask(
-        FROM_HERE,
-        Bind(&QuitFunc, &order, 6),
-        TimeDelta::FromMilliseconds(2));
-  } else {
-    MessageLoop::current()->PostNonNestableTask(
-        FROM_HERE,
-        Bind(&QuitFunc, &order, 6));
-  }
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 5));
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
+      FROM_HERE,
+      Bind(&QuitFunc, &order, 6));
 
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 
   // FIFO order.
   ASSERT_EQ(12U, order.Size());
@@ -707,27 +743,27 @@
 }
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_QuitNow(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
 
   RunLoop run_loop;
 
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 2));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&FuncThatQuitsNow));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 3));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&FuncThatQuitsNow));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&OrderedFunc, &order, 4)); // never runs
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 4));  // never runs
 
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 
   ASSERT_EQ(6U, order.Size());
   int task_index = 0;
@@ -742,7 +778,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitTop(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -750,13 +786,13 @@
   RunLoop outer_run_loop;
   RunLoop nested_run_loop;
 
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, outer_run_loop.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 2));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, nested_run_loop.QuitClosure());
 
   outer_run_loop.Run();
@@ -772,7 +808,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitNested(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -780,14 +816,14 @@
   RunLoop outer_run_loop;
   RunLoop nested_run_loop;
 
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, nested_run_loop.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 2));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
 
   outer_run_loop.Run();
 
@@ -802,7 +838,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitBogus(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -811,15 +847,15 @@
   RunLoop nested_run_loop;
   RunLoop bogus_run_loop;
 
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, bogus_run_loop.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  bogus_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 2));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, outer_run_loop.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, nested_run_loop.QuitClosure());
 
   outer_run_loop.Run();
@@ -835,7 +871,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitDeep(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -846,35 +882,35 @@
   RunLoop nested_loop3;
   RunLoop nested_loop4;
 
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 5));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, outer_run_loop.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  outer_run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 6));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, nested_loop1.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop1.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 7));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, nested_loop2.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop2.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 8));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, nested_loop3.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop3.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 9));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, nested_loop4.QuitClosure());
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  nested_loop4.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 10));
 
   outer_run_loop.Run();
@@ -904,7 +940,7 @@
 
 // Tests RunLoopQuit works before RunWithID.
 void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -913,10 +949,10 @@
 
   run_loop.Quit();
 
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&OrderedFunc, &order, 1)); // never runs
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 1));  // never runs
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatQuitsNow));  // never runs
 
   run_loop.Run();
 
@@ -925,21 +961,21 @@
 
 // Tests RunLoopQuit works during RunWithID.
 void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
 
   RunLoop run_loop;
 
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 1));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, run_loop.QuitClosure());
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&OrderedFunc, &order, 2)); // never runs
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&FuncThatQuitsNow)); // never runs
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  run_loop.QuitClosure());
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&OrderedFunc, &order, 2));  // never runs
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatQuitsNow));  // never runs
 
   run_loop.Run();
 
@@ -952,27 +988,27 @@
 
 // Tests RunLoopQuit works after RunWithID.
 void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
 
   RunLoop run_loop;
 
-  MessageLoop::current()->PostTask(FROM_HERE,
-      Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, Bind(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 2));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&FuncThatQuitsNow));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 3));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, run_loop.QuitClosure()); // has no affect
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE, run_loop.QuitClosure());  // has no affect
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE, Bind(&OrderedFunc, &order, 4));
-  MessageLoop::current()->PostTask(
-      FROM_HERE, Bind(&FuncThatQuitsNow));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&FuncThatQuitsNow));
 
   RunLoop outer_run_loop;
   outer_run_loop.Run();
@@ -992,9 +1028,8 @@
 
 void PostNTasksThenQuit(int posts_remaining) {
   if (posts_remaining > 1) {
-    MessageLoop::current()->PostTask(
-        FROM_HERE,
-        Bind(&PostNTasksThenQuit, posts_remaining - 1));
+    MessageLoop::current()->task_runner()->PostTask(
+        FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
   } else {
     MessageLoop::current()->QuitWhenIdle();
   }
@@ -1010,10 +1045,10 @@
 // times to reproduce the bug.
 void RunTest_RecursivePosts(MessagePumpFactory factory) {
   const int kNumTimes = 1 << 17;
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
-  loop.PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
-  loop.Run();
+  loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
+  RunLoop().Run();
 }
 
 }  // namespace test
diff --git a/base/message_loop/message_loop_test.h b/base/message_loop/message_loop_test.h
index 3d9889c..b7ae28e 100644
--- a/base/message_loop/message_loop_test.h
+++ b/base/message_loop/message_loop_test.h
@@ -28,12 +28,12 @@
 void RunTest_EnsureDeletion(MessagePumpFactory factory);
 void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory);
 void RunTest_Nesting(MessagePumpFactory factory);
+void RunTest_NestingObserver(MessagePumpFactory factory);
 void RunTest_RecursiveDenial1(MessagePumpFactory factory);
 void RunTest_RecursiveDenial3(MessagePumpFactory factory);
 void RunTest_RecursiveSupport1(MessagePumpFactory factory);
 void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory);
-void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory,
-                                     bool use_delayed);
+void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory);
 void RunTest_QuitNow(MessagePumpFactory factory);
 void RunTest_RunLoopQuitTop(MessagePumpFactory factory);
 void RunTest_RunLoopQuitNested(MessagePumpFactory factory);
@@ -96,11 +96,8 @@
   TEST(MessageLoopTestType##id, NonNestableWithNoNesting) { \
     base::test::RunTest_NonNestableWithNoNesting(factory); \
   } \
-  TEST(MessageLoopTestType##id, NonNestableInNestedLoop) { \
-    base::test::RunTest_NonNestableInNestedLoop(factory, false); \
-  } \
   TEST(MessageLoopTestType##id, NonNestableDelayedInNestedLoop) { \
-    base::test::RunTest_NonNestableInNestedLoop(factory, true); \
+    base::test::RunTest_NonNestableInNestedLoop(factory); \
   } \
   TEST(MessageLoopTestType##id, QuitNow) { \
     base::test::RunTest_QuitNow(factory); \
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index 1a3a925..52337e3 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -18,19 +18,20 @@
 #include "base/pending_task.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/test/test_simple_task_runner.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 #if defined(OS_WIN)
-#include "base/message_loop/message_pump_dispatcher.h"
 #include "base/message_loop/message_pump_win.h"
 #include "base/process/memory.h"
 #include "base/strings/string16.h"
+#include "base/win/current_module.h"
 #include "base/win/scoped_handle.h"
 #endif
 
@@ -41,15 +42,15 @@
 
 namespace {
 
-scoped_ptr<MessagePump> TypeDefaultMessagePumpFactory() {
+std::unique_ptr<MessagePump> TypeDefaultMessagePumpFactory() {
   return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_DEFAULT);
 }
 
-scoped_ptr<MessagePump> TypeIOMessagePumpFactory() {
+std::unique_ptr<MessagePump> TypeIOMessagePumpFactory() {
   return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_IO);
 }
 
-scoped_ptr<MessagePump> TypeUIMessagePumpFactory() {
+std::unique_ptr<MessagePump> TypeUIMessagePumpFactory() {
   return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_UI);
 }
 
@@ -416,9 +417,8 @@
 
 void PostNTasksThenQuit(int posts_remaining) {
   if (posts_remaining > 1) {
-    MessageLoop::current()->PostTask(
-        FROM_HERE,
-        Bind(&PostNTasksThenQuit, posts_remaining - 1));
+    MessageLoop::current()->task_runner()->PostTask(
+        FROM_HERE, Bind(&PostNTasksThenQuit, posts_remaining - 1));
   } else {
     MessageLoop::current()->QuitWhenIdle();
   }
@@ -426,70 +426,6 @@
 
 #if defined(OS_WIN)
 
-class DispatcherImpl : public MessagePumpDispatcher {
- public:
-  DispatcherImpl() : dispatch_count_(0) {}
-
-  uint32_t Dispatch(const NativeEvent& msg) override {
-    ::TranslateMessage(&msg);
-    ::DispatchMessage(&msg);
-    // Do not count WM_TIMER since it is not what we post and it will cause
-    // flakiness.
-    if (msg.message != WM_TIMER)
-      ++dispatch_count_;
-    // We treat WM_LBUTTONUP as the last message.
-    return msg.message == WM_LBUTTONUP ? POST_DISPATCH_QUIT_LOOP
-                                       : POST_DISPATCH_NONE;
-  }
-
-  int dispatch_count_;
-};
-
-void MouseDownUp() {
-  PostMessage(NULL, WM_LBUTTONDOWN, 0, 0);
-  PostMessage(NULL, WM_LBUTTONUP, 'A', 0);
-}
-
-void RunTest_Dispatcher(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
-
-  MessageLoop::current()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&MouseDownUp),
-      TimeDelta::FromMilliseconds(100));
-  DispatcherImpl dispatcher;
-  RunLoop run_loop(&dispatcher);
-  run_loop.Run();
-  ASSERT_EQ(2, dispatcher.dispatch_count_);
-}
-
-LRESULT CALLBACK MsgFilterProc(int code, WPARAM wparam, LPARAM lparam) {
-  if (code == MessagePumpForUI::kMessageFilterCode) {
-    MSG* msg = reinterpret_cast<MSG*>(lparam);
-    if (msg->message == WM_LBUTTONDOWN)
-      return TRUE;
-  }
-  return FALSE;
-}
-
-void RunTest_DispatcherWithMessageHook(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
-
-  MessageLoop::current()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&MouseDownUp),
-      TimeDelta::FromMilliseconds(100));
-  HHOOK msg_hook = SetWindowsHookEx(WH_MSGFILTER,
-                                    MsgFilterProc,
-                                    NULL,
-                                    GetCurrentThreadId());
-  DispatcherImpl dispatcher;
-  RunLoop run_loop(&dispatcher);
-  run_loop.Run();
-  ASSERT_EQ(1, dispatcher.dispatch_count_);
-  UnhookWindowsHookEx(msg_hook);
-}
-
 class TestIOHandler : public MessageLoopForIO::IOHandler {
  public:
   TestIOHandler(const wchar_t* name, HANDLE signal, bool wait);
@@ -514,8 +450,6 @@
 TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal, bool wait)
     : signal_(signal), wait_(wait) {
   memset(buffer_, 0, sizeof(buffer_));
-  memset(&context_, 0, sizeof(context_));
-  context_.handler = this;
 
   file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING,
                        FILE_FLAG_OVERLAPPED, NULL));
@@ -647,6 +581,9 @@
 RUN_MESSAGE_LOOP_TESTS(IO, &TypeIOMessagePumpFactory);
 
 #if defined(OS_WIN)
+// Additional set of tests for GPU version of UI message loop.
+RUN_MESSAGE_LOOP_TESTS(GPU, &MessagePumpForGpu::CreateMessagePumpForGpu);
+
 TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
   RunTest_PostDelayedTask_SharedTimer_SubPump();
 }
@@ -702,8 +639,8 @@
 
   MessageLoop loop;
   loop.AddTaskObserver(&observer);
-  loop.PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
-  loop.Run();
+  loop.task_runner()->PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumPosts));
+  RunLoop().Run();
   loop.RemoveTaskObserver(&observer);
 
   EXPECT_EQ(kNumPosts, observer.num_tasks_started());
@@ -711,16 +648,6 @@
 }
 
 #if defined(OS_WIN)
-TEST(MessageLoopTest, Dispatcher) {
-  // This test requires a UI loop
-  RunTest_Dispatcher(MessageLoop::TYPE_UI);
-}
-
-TEST(MessageLoopTest, DispatcherWithMessageHook) {
-  // This test requires a UI loop
-  RunTest_DispatcherWithMessageHook(MessageLoop::TYPE_UI);
-}
-
 TEST(MessageLoopTest, IOHandler) {
   RunTest_IOHandler();
 }
@@ -888,11 +815,10 @@
 
   MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
   loop->AddDestructionObserver(&observer);
-  loop->PostDelayedTask(
-      FROM_HERE,
-      Bind(&DestructionObserverProbe::Run,
-                 new DestructionObserverProbe(&task_destroyed,
-                                              &destruction_observer_called)),
+  loop->task_runner()->PostDelayedTask(
+      FROM_HERE, Bind(&DestructionObserverProbe::Run,
+                      new DestructionObserverProbe(
+                          &task_destroyed, &destruction_observer_called)),
       kDelay);
   delete loop;
   EXPECT_TRUE(observer.task_destroyed_before_message_loop());
@@ -913,12 +839,12 @@
       &Foo::Test1ConstRef, foo.get(), a));
 
   // Post quit task;
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&MessageLoop::QuitWhenIdle, Unretained(MessageLoop::current())));
 
   // Now kick things off
-  MessageLoop::current()->Run();
+  RunLoop().Run();
 
   EXPECT_EQ(foo->test_count(), 1);
   EXPECT_EQ(foo->result(), "a");
@@ -998,7 +924,7 @@
 
 TEST(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
   MessageLoop loop(MessageLoop::TYPE_UI);
-  HINSTANCE instance = GetModuleFromAddress(&TestWndProcThunk);
+  HINSTANCE instance = CURRENT_MODULE();
   WNDCLASSEX wc = {0};
   wc.cbSize = sizeof(wc);
   wc.lpfnWndProc = TestWndProcThunk;
@@ -1037,7 +963,7 @@
   scoped_refptr<Foo> foo(new Foo());
   original_runner->PostTask(FROM_HERE,
                             Bind(&Foo::Test1ConstRef, foo.get(), "a"));
-  loop.RunUntilIdle();
+  RunLoop().RunUntilIdle();
   EXPECT_EQ(1, foo->test_count());
 }
 
@@ -1045,11 +971,27 @@
   // It should be possible to delete an unbound message loop on a thread which
   // already has another active loop. This happens when thread creation fails.
   MessageLoop loop;
-  scoped_ptr<MessageLoop> unbound_loop(MessageLoop::CreateUnbound(
+  std::unique_ptr<MessageLoop> unbound_loop(MessageLoop::CreateUnbound(
       MessageLoop::TYPE_DEFAULT, MessageLoop::MessagePumpFactoryCallback()));
   unbound_loop.reset();
   EXPECT_EQ(&loop, MessageLoop::current());
   EXPECT_EQ(loop.task_runner(), ThreadTaskRunnerHandle::Get());
 }
 
+TEST(MessageLoopTest, ThreadName) {
+  {
+    std::string kThreadName("foo");
+    MessageLoop loop;
+    PlatformThread::SetName(kThreadName);
+    EXPECT_EQ(kThreadName, loop.GetThreadName());
+  }
+
+  {
+    std::string kThreadName("bar");
+    base::Thread thread(kThreadName);
+    ASSERT_TRUE(thread.StartAndWaitForTesting());
+    EXPECT_EQ(kThreadName, thread.message_loop()->GetThreadName());
+  }
+}
+
 }  // namespace base
diff --git a/base/message_loop/message_pump.cc b/base/message_loop/message_pump.cc
index 3d85b9b..2f740f2 100644
--- a/base/message_loop/message_pump.cc
+++ b/base/message_loop/message_pump.cc
@@ -15,4 +15,11 @@
 void MessagePump::SetTimerSlack(TimerSlack) {
 }
 
+#if defined(OS_WIN)
+bool MessagePump::WasSignaled() {
+  NOTREACHED();
+  return false;
+}
+#endif
+
 }  // namespace base
diff --git a/base/message_loop/message_pump.h b/base/message_loop/message_pump.h
index c53be80..af8ed41 100644
--- a/base/message_loop/message_pump.h
+++ b/base/message_loop/message_pump.h
@@ -124,6 +124,15 @@
 
   // Sets the timer slack to the specified value.
   virtual void SetTimerSlack(TimerSlack timer_slack);
+
+#if defined(OS_WIN)
+  // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
+  // has been investigated.
+  // This should be used for diagnostic only. If message pump wake-up mechanism
+  // is based on auto-reset event this call would reset the event to unset
+  // state.
+  virtual bool WasSignaled();
+#endif
 };
 
 }  // namespace base
diff --git a/base/message_loop/message_pump_default.cc b/base/message_loop/message_pump_default.cc
index ed15395..3449aec 100644
--- a/base/message_loop/message_pump_default.cc
+++ b/base/message_loop/message_pump_default.cc
@@ -4,6 +4,8 @@
 
 #include "base/message_loop/message_pump_default.h"
 
+#include <algorithm>
+
 #include "base/logging.h"
 #include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
@@ -16,8 +18,8 @@
 
 MessagePumpDefault::MessagePumpDefault()
     : keep_running_(true),
-      event_(false, false) {
-}
+      event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+             WaitableEvent::InitialState::NOT_SIGNALED) {}
 
 MessagePumpDefault::~MessagePumpDefault() {
 }
@@ -54,7 +56,31 @@
     } else {
       TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
       if (delay > TimeDelta()) {
+#if defined(OS_WIN)
+        // TODO(stanisc): crbug.com/623223: Consider moving the OS_WIN specific
+        // logic into TimedWait implementation in waitable_event_win.cc.
+
+        // crbug.com/487724: on Windows, waiting for less than 1 ms results in
+        // returning from TimedWait promptly and spinning
+        // MessagePumpDefault::Run loop for up to 1 ms - until it is time to
+        // run a delayed task. |min_delay| is the minimum possible wait to
+        // to avoid the spinning.
+        constexpr TimeDelta min_delay = TimeDelta::FromMilliseconds(1);
+        do {
+          delay = std::max(delay, min_delay);
+          if (event_.TimedWait(delay))
+            break;
+
+          // TimedWait can time out earlier than the specified |delay| on
+          // Windows. It doesn't make sense to run the outer loop in that case
+          // because there isn't going to be any new work. It is less overhead
+          // to just go back to wait.
+          // In practice this inner wait loop might have up to 3 iterations.
+          delay = delayed_work_time_ - TimeTicks::Now();
+        } while (delay > TimeDelta());
+#else
         event_.TimedWait(delay);
+#endif
       } else {
         // It looks like delayed_work_time_ indicates a time in the past, so we
         // need to call DoDelayedWork now.
diff --git a/base/message_loop/message_pump_dispatcher.h b/base/message_loop/message_pump_dispatcher.h
deleted file mode 100644
index 5b1bd55..0000000
--- a/base/message_loop/message_pump_dispatcher.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_DISPATCHER_H_
-#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_DISPATCHER_H_
-
-#include <stdint.h>
-
-#include "base/base_export.h"
-#include "base/event_types.h"
-
-namespace base {
-
-// Dispatcher is used during a nested invocation of Run to dispatch events when
-// |RunLoop(dispatcher).Run()| is used.  If |RunLoop().Run()| is invoked,
-// MessageLoop does not dispatch events (or invoke TranslateMessage), rather
-// every message is passed to Dispatcher's Dispatch method for dispatch. It is
-// up to the Dispatcher whether or not to dispatch the event.
-//
-// The nested loop is exited by either posting a quit, or setting the
-// POST_DISPATCH_QUIT_LOOP flag on the return value from Dispatch.
-class BASE_EXPORT MessagePumpDispatcher {
- public:
-  enum PostDispatchAction {
-    POST_DISPATCH_NONE = 0x0,
-    POST_DISPATCH_QUIT_LOOP = 0x1,
-    POST_DISPATCH_PERFORM_DEFAULT = 0x2,
-  };
-
-  virtual ~MessagePumpDispatcher() {}
-
-  // Dispatches the event. The return value can have more than one
-  // PostDispatchAction flags OR'ed together. If POST_DISPATCH_PERFORM_DEFAULT
-  // is set in the returned value, then the message-pump performs the default
-  // action. If POST_DISPATCH_QUIT_LOOP is set, in the return value, then the
-  // nested loop exits immediately.
-  virtual uint32_t Dispatch(const NativeEvent& event) = 0;
-};
-
-}  // namespace base
-
-#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_DISPATCHER_H_
diff --git a/base/message_loop/message_pump_glib.cc b/base/message_loop/message_pump_glib.cc
index f06f60d..fd23745 100644
--- a/base/message_loop/message_pump_glib.cc
+++ b/base/message_loop/message_pump_glib.cc
@@ -52,7 +52,7 @@
 // returns FALSE, GLib will destroy the source.  Dispatch calls may be recursive
 // (i.e., you can call Run from them), but Prepare and Check cannot.
 //     Finalize is called when the source is destroyed.
-// NOTE: It is common for subsytems to want to process pending events while
+// NOTE: It is common for subsystems to want to process pending events while
 // doing intensive work, for example the flash plugin. They usually use the
 // following pattern (recommended by the GTK docs):
 // while (gtk_events_pending()) {
@@ -350,7 +350,7 @@
 
 void MessagePumpGlib::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
   // We need to wake up the loop in case the poll timeout needs to be
-  // adjusted.  This will cause us to try to do work, but that's ok.
+  // adjusted.  This will cause us to try to do work, but that's OK.
   delayed_work_time_ = delayed_work_time;
   ScheduleWork();
 }
diff --git a/base/message_loop/message_pump_glib.h b/base/message_loop/message_pump_glib.h
index 9f44571..a2b54d8 100644
--- a/base/message_loop/message_pump_glib.h
+++ b/base/message_loop/message_pump_glib.h
@@ -5,8 +5,10 @@
 #ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
 #define BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
 
+#include <memory>
+
 #include "base/base_export.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/macros.h"
 #include "base/message_loop/message_pump.h"
 #include "base/observer_list.h"
 #include "base/time/time.h"
@@ -68,7 +70,7 @@
   int wakeup_pipe_read_;
   int wakeup_pipe_write_;
   // Use a scoped_ptr to avoid needing the definition of GPollFD in the header.
-  scoped_ptr<GPollFD> wakeup_gpollfd_;
+  std::unique_ptr<GPollFD> wakeup_gpollfd_;
 
   DISALLOW_COPY_AND_ASSIGN(MessagePumpGlib);
 };
diff --git a/base/message_loop/message_pump_libevent.cc b/base/message_loop/message_pump_libevent.cc
index 72726a8..5aa5567 100644
--- a/base/message_loop/message_pump_libevent.cc
+++ b/base/message_loop/message_pump_libevent.cc
@@ -7,25 +7,19 @@
 #include <errno.h>
 #include <unistd.h>
 
+#include <memory>
+
 #include "base/auto_reset.h"
 #include "base/compiler_specific.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/observer_list.h"
 #include "base/posix/eintr_wrapper.h"
+#include "base/third_party/libevent/event.h"
 #include "base/time/time.h"
 #include "base/trace_event/trace_event.h"
 #include "build/build_config.h"
 
-#if defined(__ANDROID__) || defined(__ANDROID_HOST__)
-#include <event2/event.h>
-#include <event2/event_compat.h>
-#include <event2/event_struct.h>
-#else
-#include "third_party/libevent/event.h"
-#endif
-
 #if defined(OS_MACOSX)
 #include "base/mac/scoped_nsautorelease_pool.h"
 #endif
@@ -94,22 +88,20 @@
 }
 
 void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
-    int fd, MessagePumpLibevent* pump) {
+    int fd,
+    MessagePumpLibevent*) {
   // Since OnFileCanWriteWithoutBlocking() gets called first, it can stop
   // watching the file descriptor.
   if (!watcher_)
     return;
-  pump->WillProcessIOEvent();
   watcher_->OnFileCanReadWithoutBlocking(fd);
-  pump->DidProcessIOEvent();
 }
 
 void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanWriteWithoutBlocking(
-    int fd, MessagePumpLibevent* pump) {
+    int fd,
+    MessagePumpLibevent*) {
   DCHECK(watcher_);
-  pump->WillProcessIOEvent();
   watcher_->OnFileCanWriteWithoutBlocking(fd);
-  pump->DidProcessIOEvent();
 }
 
 MessagePumpLibevent::MessagePumpLibevent()
@@ -160,7 +152,7 @@
     event_mask |= EV_WRITE;
   }
 
-  scoped_ptr<event> evt(controller->ReleaseEvent());
+  std::unique_ptr<event> evt(controller->ReleaseEvent());
   if (evt.get() == NULL) {
     // Ownership is transferred to the controller.
     evt.reset(new event);
@@ -205,17 +197,8 @@
   return true;
 }
 
-void MessagePumpLibevent::AddIOObserver(IOObserver *obs) {
-  io_observers_.AddObserver(obs);
-}
-
-void MessagePumpLibevent::RemoveIOObserver(IOObserver *obs) {
-  io_observers_.RemoveObserver(obs);
-}
-
 // Tell libevent to break out of inner loop.
-static void timer_callback(int /* fd */, short /* events */, void *context)
-{
+static void timer_callback(int /*fd*/, short /*events*/, void* context) {
   event_base_loopbreak((struct event_base *)context);
 }
 
@@ -226,7 +209,7 @@
 
   // event_base_loopexit() + EVLOOP_ONCE is leaky, see http://crbug.com/25641.
   // Instead, make our own timer and reuse it on each call to event_base_loop().
-  scoped_ptr<event> timer_event(new event);
+  std::unique_ptr<event> timer_event(new event);
 
   for (;;) {
 #if defined(OS_MACOSX)
@@ -307,14 +290,6 @@
   delayed_work_time_ = delayed_work_time;
 }
 
-void MessagePumpLibevent::WillProcessIOEvent() {
-  FOR_EACH_OBSERVER(IOObserver, io_observers_, WillProcessIOEvent());
-}
-
-void MessagePumpLibevent::DidProcessIOEvent() {
-  FOR_EACH_OBSERVER(IOObserver, io_observers_, DidProcessIOEvent());
-}
-
 bool MessagePumpLibevent::Init() {
   int fds[2];
   if (pipe(fds)) {
@@ -374,8 +349,7 @@
 
 // Called if a byte is received on the wakeup pipe.
 // static
-void MessagePumpLibevent::OnWakeup(int socket, short /* flags */,
-                                   void* context) {
+void MessagePumpLibevent::OnWakeup(int socket, short /*flags*/, void* context) {
   MessagePumpLibevent* that = static_cast<MessagePumpLibevent*>(context);
   DCHECK(that->wakeup_pipe_out_ == socket);
 
diff --git a/base/message_loop/message_pump_libevent.h b/base/message_loop/message_pump_libevent.h
index 4d2f4f7..76f882f 100644
--- a/base/message_loop/message_pump_libevent.h
+++ b/base/message_loop/message_pump_libevent.h
@@ -8,7 +8,6 @@
 #include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "base/message_loop/message_pump.h"
-#include "base/observer_list.h"
 #include "base/threading/thread_checker.h"
 #include "base/time/time.h"
 
@@ -22,21 +21,6 @@
 // TODO(dkegel): add support for background file IO somehow
 class BASE_EXPORT MessagePumpLibevent : public MessagePump {
  public:
-  class IOObserver {
-   public:
-    IOObserver() {}
-
-    // An IOObserver is an object that receives IO notifications from the
-    // MessagePump.
-    //
-    // NOTE: An IOObserver implementation should be extremely fast!
-    virtual void WillProcessIOEvent() = 0;
-    virtual void DidProcessIOEvent() = 0;
-
-   protected:
-    virtual ~IOObserver() {}
-  };
-
   // Used with WatchFileDescriptor to asynchronously monitor the I/O readiness
   // of a file descriptor.
   class Watcher {
@@ -119,9 +103,6 @@
                            FileDescriptorWatcher *controller,
                            Watcher *delegate);
 
-  void AddIOObserver(IOObserver* obs);
-  void RemoveIOObserver(IOObserver* obs);
-
   // MessagePump methods:
   void Run(Delegate* delegate) override;
   void Quit() override;
@@ -168,7 +149,6 @@
   // ... libevent wrapper for read end
   event* wakeup_event_;
 
-  ObserverList<IOObserver> io_observers_;
   ThreadChecker watch_file_descriptor_caller_checker_;
   DISALLOW_COPY_AND_ASSIGN(MessagePumpLibevent);
 };
diff --git a/base/metrics/OWNERS b/base/metrics/OWNERS
index 3fd7c0d..feb8271 100644
--- a/base/metrics/OWNERS
+++ b/base/metrics/OWNERS
@@ -1,3 +1,2 @@
 asvitkine@chromium.org
 isherman@chromium.org
-jar@chromium.org
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index b417b05..600b94e 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -9,6 +9,7 @@
 #include "base/build_time.h"
 #include "base/logging.h"
 #include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/strings/utf_string_conversions.h"
@@ -43,8 +44,14 @@
   exploded.minute = 0;
   exploded.second = 0;
   exploded.millisecond = 0;
+  Time out_time;
+  if (!Time::FromLocalExploded(exploded, &out_time)) {
+    // TODO(maksims): implement failure handling.
+    // We might just return |out_time|, which is Time(0).
+    NOTIMPLEMENTED();
+  }
 
-  return Time::FromLocalExploded(exploded);
+  return out_time;
 }
 
 // Returns the boundary value for comparing against the FieldTrial's added
@@ -122,6 +129,8 @@
 
 FieldTrial::State::State() : activated(false) {}
 
+FieldTrial::State::State(const State& other) = default;
+
 FieldTrial::State::~State() {}
 
 void FieldTrial::Disable() {
@@ -335,8 +344,8 @@
     FieldTrial::RandomizationType randomization_type,
     int* default_group_number) {
   return FactoryGetFieldTrialWithRandomizationSeed(
-      trial_name, total_probability, default_group_name,
-      year, month, day_of_month, randomization_type, 0, default_group_number);
+      trial_name, total_probability, default_group_name, year, month,
+      day_of_month, randomization_type, 0, default_group_number, NULL);
 }
 
 // static
@@ -349,7 +358,8 @@
     const int day_of_month,
     FieldTrial::RandomizationType randomization_type,
     uint32_t randomization_seed,
-    int* default_group_number) {
+    int* default_group_number,
+    const FieldTrial::EntropyProvider* override_entropy_provider) {
   if (default_group_number)
     *default_group_number = FieldTrial::kDefaultGroupNumber;
   // Check if the field trial has already been created in some other way.
@@ -383,8 +393,10 @@
 
   double entropy_value;
   if (randomization_type == FieldTrial::ONE_TIME_RANDOMIZED) {
+    // If an override entropy provider is given, use it.
     const FieldTrial::EntropyProvider* entropy_provider =
-        GetEntropyProviderForOneTimeRandomization();
+        override_entropy_provider ? override_entropy_provider
+                                  : GetEntropyProviderForOneTimeRandomization();
     CHECK(entropy_provider);
     entropy_value = entropy_provider->GetEntropyForTrial(trial_name,
                                                          randomization_seed);
@@ -515,7 +527,6 @@
 // static
 bool FieldTrialList::CreateTrialsFromString(
     const std::string& trials_string,
-    FieldTrialActivationMode mode,
     const std::set<std::string>& ignored_trial_names) {
   DCHECK(global_);
   if (trials_string.empty() || !global_)
@@ -535,7 +546,7 @@
     FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
     if (!trial)
       return false;
-    if (mode == ACTIVATE_TRIALS || entry.activated) {
+    if (entry.activated) {
       // Call |group()| to mark the trial as "used" and notify observers, if
       // any. This is useful to ensure that field trials created in child
       // processes are properly reported in crash reports.
@@ -638,7 +649,7 @@
     return;
   }
   AutoLock auto_lock(global_->lock_);
-  DCHECK(!global_->PreLockedFind(trial->trial_name()));
+  CHECK(!global_->PreLockedFind(trial->trial_name())) << trial->trial_name();
   trial->AddRef();
   trial->SetTrialRegistered();
   global_->registered_[trial->trial_name()] = trial;
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index 7bfc1de..28a4606 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -119,6 +119,7 @@
     bool activated;
 
     State();
+    State(const State& other);
     ~State();
   };
 
@@ -320,14 +321,6 @@
 // Only one instance of this class exists.
 class BASE_EXPORT FieldTrialList {
  public:
-  // Specifies whether field trials should be activated (marked as "used"), when
-  // created using |CreateTrialsFromString()|. Has no effect on trials that are
-  // prefixed with |kActivationMarker|, which will always be activated."
-  enum FieldTrialActivationMode {
-    DONT_ACTIVATE_TRIALS,
-    ACTIVATE_TRIALS,
-  };
-
   // Year that is guaranteed to not be expired when instantiating a field trial
   // via |FactoryGetFieldTrial()|.  Set to two years from the build date.
   static int kNoExpirationYear;
@@ -386,9 +379,12 @@
   // used on one-time randomized field trials (instead of a hash of the trial
   // name, which is used otherwise or if |randomization_seed| has value 0). The
   // |randomization_seed| value (other than 0) should never be the same for two
-  // trials, else this would result in correlated group assignments.
-  // Note: Using a custom randomization seed is only supported by the
-  // PermutedEntropyProvider (which is used when UMA is not enabled).
+  // trials, else this would result in correlated group assignments.  Note:
+  // Using a custom randomization seed is only supported by the
+  // PermutedEntropyProvider (which is used when UMA is not enabled). If
+  // |override_entropy_provider| is not null, then it will be used for
+  // randomization instead of the provider given when the FieldTrialList was
+  // instanciated.
   static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
       const std::string& trial_name,
       FieldTrial::Probability total_probability,
@@ -398,7 +394,8 @@
       const int day_of_month,
       FieldTrial::RandomizationType randomization_type,
       uint32_t randomization_seed,
-      int* default_group_number);
+      int* default_group_number,
+      const FieldTrial::EntropyProvider* override_entropy_provider);
 
   // The Find() method can be used to test to see if a named trial was already
   // registered, or to retrieve a pointer to it from the global map.
@@ -457,14 +454,12 @@
   // for each trial, force them to have the same group string. This is commonly
   // used in a non-browser process, to carry randomly selected state in a
   // browser process into this non-browser process, but could also be invoked
-  // through a command line argument to the browser process. The created field
-  // trials are all marked as "used" for the purposes of active trial reporting
-  // if |mode| is ACTIVATE_TRIALS, otherwise each trial will be marked as "used"
-  // if it is prefixed with |kActivationMarker|. Trial names in
+  // through a command line argument to the browser process. Created field
+  // trials will be marked "used" for the purposes of active trial reporting
+  // if they are prefixed with |kActivationMarker|. Trial names in
   // |ignored_trial_names| are ignored when parsing |trials_string|.
   static bool CreateTrialsFromString(
       const std::string& trials_string,
-      FieldTrialActivationMode mode,
       const std::set<std::string>& ignored_trial_names);
 
   // Create a FieldTrial with the given |name| and using 100% probability for
@@ -519,9 +514,11 @@
   base::Lock lock_;
   RegistrationMap registered_;
 
+  std::map<std::string, std::string> seen_states_;
+
   // Entropy provider to be used for one-time randomized field trials. If NULL,
   // one-time randomization is not supported.
-  scoped_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
+  std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
 
   // List of observers to be notified when a group is selected for a FieldTrial.
   scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc
index 555d7fa..00f351f 100644
--- a/base/metrics/field_trial_unittest.cc
+++ b/base/metrics/field_trial_unittest.cc
@@ -504,7 +504,6 @@
   ASSERT_FALSE(FieldTrialList::TrialExists("xxx"));
 
   FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/",
-                                         FieldTrialList::DONT_ACTIVATE_TRIALS,
                                          std::set<std::string>());
 
   FieldTrial* trial = FieldTrialList::Find("Some_name");
@@ -519,9 +518,8 @@
 }
 
 TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
-  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "tname/gname", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname",
+                                                     std::set<std::string>()));
 
   FieldTrial* trial = FieldTrialList::Find("tname");
   ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
@@ -530,21 +528,16 @@
 }
 
 TEST_F(FieldTrialTest, BogusRestore) {
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "MissingSlash", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "MissingGroupName/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "noname, only group/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "/emptyname", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "*/emptyname", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname",
+                                                      std::set<std::string>()));
 }
 
 TEST_F(FieldTrialTest, DuplicateRestore) {
@@ -558,38 +551,19 @@
   EXPECT_EQ("Some name/Winner/", save_string);
 
   // It is OK if we redundantly specify a winner.
-  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(
-      save_string, FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string,
+                                                     std::set<std::string>()));
 
   // But it is an error to try to change to a different winner.
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "Some name/Loser/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-}
-
-TEST_F(FieldTrialTest, CreateTrialsFromStringActive) {
-  ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
-  ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/Xyz/zyx/", FieldTrialList::ACTIVATE_TRIALS,
-      std::set<std::string>()));
-
-  FieldTrial::ActiveGroups active_groups;
-  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
-  ASSERT_EQ(2U, active_groups.size());
-  EXPECT_EQ("Abc", active_groups[0].trial_name);
-  EXPECT_EQ("def", active_groups[0].group_name);
-  EXPECT_EQ("Xyz", active_groups[1].trial_name);
-  EXPECT_EQ("zyx", active_groups[1].group_name);
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/",
+                                                      std::set<std::string>()));
 }
 
 TEST_F(FieldTrialTest, CreateTrialsFromStringNotActive) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
   ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/Xyz/zyx/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/",
+                                                     std::set<std::string>()));
 
   FieldTrial::ActiveGroups active_groups;
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -612,8 +586,7 @@
   ASSERT_FALSE(FieldTrialList::TrialExists("def"));
   ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
   ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "*Abc/cba/def/fed/*Xyz/zyx/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+      "*Abc/cba/def/fed/*Xyz/zyx/", std::set<std::string>()));
 
   FieldTrial::ActiveGroups active_groups;
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -624,25 +597,12 @@
   EXPECT_EQ("zyx", active_groups[1].group_name);
 }
 
-TEST_F(FieldTrialTest, CreateTrialsFromStringActiveObserver) {
-  ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
-
-  TestFieldTrialObserver observer;
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/", FieldTrialList::ACTIVATE_TRIALS, std::set<std::string>()));
-
-  RunLoop().RunUntilIdle();
-  EXPECT_EQ("Abc", observer.trial_name());
-  EXPECT_EQ("def", observer.group_name());
-}
-
 TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
 
   TestFieldTrialObserver observer;
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/",
+                                                     std::set<std::string>()));
   RunLoop().RunUntilIdle();
   // Observer shouldn't be notified.
   EXPECT_TRUE(observer.trial_name().empty());
@@ -673,7 +633,6 @@
       "Unaccepted2/Unaccepted2_name/"
       "Bar/Bar_name/"
       "Unaccepted3/Unaccepted3_name/",
-      FieldTrialList::DONT_ACTIVATE_TRIALS,
       ignored_trial_names);
 
   EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
@@ -1148,9 +1107,8 @@
 
   // Starting with a new blank FieldTrialList.
   FieldTrialList field_trial_list(NULL);
-  ASSERT_TRUE(field_trial_list.CreateTrialsFromString(
-      save_string, FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string,
+                                                      std::set<std::string>()));
 
   FieldTrial::ActiveGroups active_groups;
   field_trial_list.GetActiveFieldTrialGroups(&active_groups);
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index 9b9f99d..0d6287c 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -18,8 +18,11 @@
 #include "base/compiler_specific.h"
 #include "base/debug/alias.h"
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
@@ -37,13 +40,13 @@
                             int* flags,
                             int* declared_min,
                             int* declared_max,
-                            size_t* bucket_count,
+                            uint32_t* bucket_count,
                             uint32_t* range_checksum) {
   if (!iter->ReadString(histogram_name) ||
       !iter->ReadInt(flags) ||
       !iter->ReadInt(declared_min) ||
       !iter->ReadInt(declared_max) ||
-      !iter->ReadSizeT(bucket_count) ||
+      !iter->ReadUInt32(bucket_count) ||
       !iter->ReadUInt32(range_checksum)) {
     DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
     return false;
@@ -61,8 +64,7 @@
   }
 
   // We use the arguments to find or create the local version of the histogram
-  // in this process, so we need to clear the IPC flag.
-  DCHECK(*flags & HistogramBase::kIPCSerializationSourceFlag);
+  // in this process, so we need to clear any IPC flag.
   *flags &= ~HistogramBase::kIPCSerializationSourceFlag;
 
   return true;
@@ -82,51 +84,170 @@
 typedef HistogramBase::Sample Sample;
 
 // static
-const size_t Histogram::kBucketCount_MAX = 16384u;
+const uint32_t Histogram::kBucketCount_MAX = 16384u;
 
-HistogramBase* Histogram::FactoryGet(const std::string& name,
-                                     Sample minimum,
-                                     Sample maximum,
-                                     size_t bucket_count,
-                                     int32_t flags) {
-  bool valid_arguments =
-      InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
-  DCHECK(valid_arguments);
+class Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags)
+    : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
+  virtual ~Factory() = default;
 
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    BucketRanges* ranges = new BucketRanges(bucket_count + 1);
-    InitializeBucketRanges(minimum, maximum, ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
+  // Create histogram based on construction parameters. Caller takes
+  // ownership of the returned object.
+  HistogramBase* Build();
 
-    Histogram* tentative_histogram =
-        new Histogram(name, minimum, maximum, registered_ranges);
+ protected:
+  Factory(const std::string& name,
+          HistogramType histogram_type,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags)
+    : name_(name),
+      histogram_type_(histogram_type),
+      minimum_(minimum),
+      maximum_(maximum),
+      bucket_count_(bucket_count),
+      flags_(flags) {}
 
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+  // Create a BucketRanges structure appropriate for this histogram.
+  virtual BucketRanges* CreateRanges() {
+    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+    Histogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+    return ranges;
   }
 
-  DCHECK_EQ(HISTOGRAM, histogram->GetHistogramType());
-  if (!histogram->HasConstructionArguments(minimum, maximum, bucket_count)) {
+  // Allocate the correct Histogram object off the heap (in case persistent
+  // memory is not available).
+  virtual std::unique_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
+    return WrapUnique(new Histogram(name_, minimum_, maximum_, ranges));
+  }
+
+  // Perform any required datafill on the just-created histogram.  If
+  // overridden, be sure to call the "super" version -- this method may not
+  // always remain empty.
+  virtual void FillHistogram(HistogramBase* /*histogram*/) {}
+
+  // These values are protected (instead of private) because they need to
+  // be accessible to methods of sub-classes in order to avoid passing
+  // unnecessary parameters everywhere.
+  const std::string& name_;
+  const HistogramType histogram_type_;
+  HistogramBase::Sample minimum_;
+  HistogramBase::Sample maximum_;
+  uint32_t bucket_count_;
+  int32_t flags_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* Histogram::Factory::Build() {
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
+  if (!histogram) {
+    // To avoid racy destruction at shutdown, the following will be leaked.
+    const BucketRanges* created_ranges = CreateRanges();
+    const BucketRanges* registered_ranges =
+        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(created_ranges);
+
+    // In most cases, the bucket-count, minimum, and maximum values are known
+    // when the code is written and so are passed in explicitly. In other
+    // cases (such as with a CustomHistogram), they are calculated dynamically
+    // at run-time. In the latter case, those ctor parameters are zero and
+    // the results extracted from the result of CreateRanges().
+    if (bucket_count_ == 0) {
+      bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
+      minimum_ = registered_ranges->range(1);
+      maximum_ = registered_ranges->range(bucket_count_ - 1);
+    }
+
+    // Try to create the histogram using a "persistent" allocator. As of
+    // 2016-02-25, the availability of such is controlled by a base::Feature
+    // that is off by default. If the allocator doesn't exist or if
+    // allocating from it fails, code below will allocate the histogram from
+    // the process heap.
+    PersistentHistogramAllocator::Reference histogram_ref = 0;
+    std::unique_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+    if (allocator) {
+      tentative_histogram = allocator->AllocateHistogram(
+          histogram_type_,
+          name_,
+          minimum_,
+          maximum_,
+          registered_ranges,
+          flags_,
+          &histogram_ref);
+    }
+
+    // Handle the case where no persistent allocator is present or the
+    // persistent allocation fails (perhaps because it is full).
+    if (!tentative_histogram) {
+      DCHECK(!histogram_ref);  // Should never have been set.
+      DCHECK(!allocator);  // Shouldn't have failed.
+      flags_ &= ~HistogramBase::kIsPersistent;
+      tentative_histogram = HeapAlloc(registered_ranges);
+      tentative_histogram->SetFlags(flags_);
+    }
+
+    FillHistogram(tentative_histogram.get());
+
+    // Register this histogram with the StatisticsRecorder. Keep a copy of
+    // the pointer value to tell later whether the locally created histogram
+    // was registered or deleted. The type is "void" because it could point
+    // to released memory after the following line.
+    const void* tentative_histogram_ptr = tentative_histogram.get();
+    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+        tentative_histogram.release());
+
+    // Persistent histograms need some follow-up processing.
+    if (histogram_ref) {
+      allocator->FinalizeHistogram(histogram_ref,
+                                   histogram == tentative_histogram_ptr);
+    }
+
+    // Update report on created histograms.
+    ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
+  } else {
+    // Update report on lookup histograms.
+    ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
+  }
+
+  DCHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
+  if (bucket_count_ != 0 &&
+      !histogram->HasConstructionArguments(minimum_, maximum_, bucket_count_)) {
     // The construction arguments do not match the existing histogram.  This can
     // come about if an extension updates in the middle of a chrome run and has
     // changed one of them, or simply by bad code within Chrome itself.  We
     // return NULL here with the expectation that bad code in Chrome will crash
     // on dereference, but extension/Pepper APIs will guard against NULL and not
     // crash.
-    LOG(ERROR) << "Histogram " << name << " has bad construction arguments";
-    return NULL;
+    DLOG(ERROR) << "Histogram " << name_ << " has bad construction arguments";
+    return nullptr;
   }
   return histogram;
 }
 
+HistogramBase* Histogram::FactoryGet(const std::string& name,
+                                     Sample minimum,
+                                     Sample maximum,
+                                     uint32_t bucket_count,
+                                     int32_t flags) {
+  bool valid_arguments =
+      InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
+  DCHECK(valid_arguments);
+
+  return Factory(name, minimum, maximum, bucket_count, flags).Build();
+}
+
 HistogramBase* Histogram::FactoryTimeGet(const std::string& name,
                                          TimeDelta minimum,
                                          TimeDelta maximum,
-                                         size_t bucket_count,
+                                         uint32_t bucket_count,
                                          int32_t flags) {
   return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
                     static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
@@ -136,7 +257,7 @@
 HistogramBase* Histogram::FactoryGet(const char* name,
                                      Sample minimum,
                                      Sample maximum,
-                                     size_t bucket_count,
+                                     uint32_t bucket_count,
                                      int32_t flags) {
   return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
 }
@@ -144,12 +265,27 @@
 HistogramBase* Histogram::FactoryTimeGet(const char* name,
                                          TimeDelta minimum,
                                          TimeDelta maximum,
-                                         size_t bucket_count,
+                                         uint32_t bucket_count,
                                          int32_t flags) {
   return FactoryTimeGet(std::string(name), minimum, maximum, bucket_count,
                         flags);
 }
 
+std::unique_ptr<HistogramBase> Histogram::PersistentCreate(
+    const std::string& name,
+    Sample minimum,
+    Sample maximum,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    uint32_t counts_size,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new Histogram(name, minimum, maximum, ranges, counts,
+                                  logged_counts, counts_size, meta,
+                                  logged_meta));
+}
+
 // Calculate what range of values are held in each bucket.
 // We have to be careful that we don't pick a ratio between starting points in
 // consecutive buckets that is sooo small, that the integer bounds are the same
@@ -192,10 +328,10 @@
 // static
 const int Histogram::kCommonRaceBasedCountMismatch = 5;
 
-int Histogram::FindCorruption(const HistogramSamples& samples) const {
+uint32_t Histogram::FindCorruption(const HistogramSamples& samples) const {
   int inconsistencies = NO_INCONSISTENCIES;
   Sample previous_range = -1;  // Bottom range is always 0.
-  for (size_t index = 0; index < bucket_count(); ++index) {
+  for (uint32_t index = 0; index < bucket_count(); ++index) {
     int new_range = ranges(index);
     if (previous_range >= new_range)
       inconsistencies |= BUCKET_ORDER_ERROR;
@@ -224,19 +360,19 @@
   return inconsistencies;
 }
 
-Sample Histogram::ranges(size_t i) const {
+Sample Histogram::ranges(uint32_t i) const {
   return bucket_ranges_->range(i);
 }
 
-size_t Histogram::bucket_count() const {
-  return bucket_ranges_->bucket_count();
+uint32_t Histogram::bucket_count() const {
+  return static_cast<uint32_t>(bucket_ranges_->bucket_count());
 }
 
 // static
 bool Histogram::InspectConstructionArguments(const std::string& name,
                                              Sample* minimum,
                                              Sample* maximum,
-                                             size_t* bucket_count) {
+                                             uint32_t* bucket_count) {
   // Defensive code for backward compatibility.
   if (*minimum < 1) {
     DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
@@ -256,7 +392,7 @@
     return false;
   if (*bucket_count < 3)
     return false;
-  if (*bucket_count > static_cast<size_t>(*maximum - *minimum + 2))
+  if (*bucket_count > static_cast<uint32_t>(*maximum - *minimum + 2))
     return false;
   return true;
 }
@@ -271,7 +407,7 @@
 
 bool Histogram::HasConstructionArguments(Sample expected_minimum,
                                          Sample expected_maximum,
-                                         size_t expected_bucket_count) const {
+                                         uint32_t expected_bucket_count) const {
   return ((expected_minimum == declared_min_) &&
           (expected_maximum == declared_max_) &&
           (expected_bucket_count == bucket_count()));
@@ -298,10 +434,39 @@
   FindAndRunCallback(value);
 }
 
-scoped_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
+std::unique_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
   return SnapshotSampleVector();
 }
 
+std::unique_ptr<HistogramSamples> Histogram::SnapshotDelta() {
+  DCHECK(!final_delta_created_);
+
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
+  if (!logged_samples_) {
+    // If nothing has been previously logged, save this one as
+    // |logged_samples_| and gather another snapshot to return.
+    logged_samples_.swap(snapshot);
+    return SnapshotSampleVector();
+  }
+
+  // Subtract what was previously logged and update that information.
+  snapshot->Subtract(*logged_samples_);
+  logged_samples_->Add(*snapshot);
+  return snapshot;
+}
+
+std::unique_ptr<HistogramSamples> Histogram::SnapshotFinalDelta() const {
+  DCHECK(!final_delta_created_);
+  final_delta_created_ = true;
+
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
+
+  // Subtract what was previously logged and then return.
+  if (logged_samples_)
+    snapshot->Subtract(*logged_samples_);
+  return snapshot;
+}
+
 void Histogram::AddSamples(const HistogramSamples& samples) {
   samples_->Add(samples);
 }
@@ -328,7 +493,7 @@
       pickle->WriteInt(flags()) &&
       pickle->WriteInt(declared_min()) &&
       pickle->WriteInt(declared_max()) &&
-      pickle->WriteSizeT(bucket_count()) &&
+      pickle->WriteUInt32(bucket_count()) &&
       pickle->WriteUInt32(bucket_ranges()->checksum());
 }
 
@@ -344,10 +509,31 @@
     samples_.reset(new SampleVector(HashMetricName(name), ranges));
 }
 
+Histogram::Histogram(const std::string& name,
+                     Sample minimum,
+                     Sample maximum,
+                     const BucketRanges* ranges,
+                     HistogramBase::AtomicCount* counts,
+                     HistogramBase::AtomicCount* logged_counts,
+                     uint32_t counts_size,
+                     HistogramSamples::Metadata* meta,
+                     HistogramSamples::Metadata* logged_meta)
+  : HistogramBase(name),
+    bucket_ranges_(ranges),
+    declared_min_(minimum),
+    declared_max_(maximum) {
+  if (ranges) {
+    samples_.reset(new SampleVector(HashMetricName(name),
+                                    counts, counts_size, meta, ranges));
+    logged_samples_.reset(new SampleVector(samples_->id(), logged_counts,
+                                           counts_size, logged_meta, ranges));
+  }
+}
+
 Histogram::~Histogram() {
 }
 
-bool Histogram::PrintEmptyBucket(size_t /* index */) const {
+bool Histogram::PrintEmptyBucket(uint32_t /*index*/) const {
   return true;
 }
 
@@ -356,7 +542,7 @@
 // get so big so fast (and we don't expect to see a lot of entries in the large
 // buckets), so we need this to make it possible to see what is going on and
 // not have 0-graphical-height buckets.
-double Histogram::GetBucketSize(Count current, size_t i) const {
+double Histogram::GetBucketSize(Count current, uint32_t i) const {
   DCHECK_GT(ranges(i + 1), ranges(i));
   static const double kTransitionWidth = 5;
   double denominator = ranges(i + 1) - ranges(i);
@@ -365,7 +551,7 @@
   return current/denominator;
 }
 
-const std::string Histogram::GetAsciiBucketRange(size_t i) const {
+const std::string Histogram::GetAsciiBucketRange(uint32_t i) const {
   return GetSimpleAsciiBucketRange(ranges(i));
 }
 
@@ -378,7 +564,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -397,8 +583,8 @@
   return histogram;
 }
 
-scoped_ptr<SampleVector> Histogram::SnapshotSampleVector() const {
-  scoped_ptr<SampleVector> samples(
+std::unique_ptr<SampleVector> Histogram::SnapshotSampleVector() const {
+  std::unique_ptr<SampleVector> samples(
       new SampleVector(samples_->id(), bucket_ranges()));
   samples->Add(*samples_);
   return samples;
@@ -409,7 +595,7 @@
                                std::string* output) const {
   // Get local (stack) copies of all effectively volatile class data so that we
   // are consistent across our output activities.
-  scoped_ptr<SampleVector> snapshot = SnapshotSampleVector();
+  std::unique_ptr<SampleVector> snapshot = SnapshotSampleVector();
   Count sample_count = snapshot->TotalCount();
 
   WriteAsciiHeader(*snapshot, sample_count, output);
@@ -422,7 +608,7 @@
 
   // Calculate space needed to print bucket range numbers.  Leave room to print
   // nearly the largest bucket range without sliding over the histogram.
-  size_t largest_non_empty_bucket = bucket_count() - 1;
+  uint32_t largest_non_empty_bucket = bucket_count() - 1;
   while (0 == snapshot->GetCountAtIndex(largest_non_empty_bucket)) {
     if (0 == largest_non_empty_bucket)
       break;  // All buckets are empty.
@@ -431,7 +617,7 @@
 
   // Calculate largest print width needed for any of our bucket range displays.
   size_t print_width = 1;
-  for (size_t i = 0; i < bucket_count(); ++i) {
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
     if (snapshot->GetCountAtIndex(i)) {
       size_t width = GetAsciiBucketRange(i).size() + 1;
       if (width > print_width)
@@ -442,7 +628,7 @@
   int64_t remaining = sample_count;
   int64_t past = 0;
   // Output the actual histogram graph.
-  for (size_t i = 0; i < bucket_count(); ++i) {
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
     Count current = snapshot->GetCountAtIndex(i);
     if (!current && !PrintEmptyBucket(i))
       continue;
@@ -473,7 +659,7 @@
 
 double Histogram::GetPeakBucketSize(const SampleVector& samples) const {
   double max = 0;
-  for (size_t i = 0; i < bucket_count() ; ++i) {
+  for (uint32_t i = 0; i < bucket_count() ; ++i) {
     double current_size = GetBucketSize(samples.GetCountAtIndex(i), i);
     if (current_size > max)
       max = current_size;
@@ -502,7 +688,7 @@
 void Histogram::WriteAsciiBucketContext(const int64_t past,
                                         const Count current,
                                         const int64_t remaining,
-                                        const size_t i,
+                                        const uint32_t i,
                                         std::string* output) const {
   double scaled_sum = (past + current + remaining) / 100.0;
   WriteAsciiBucketValue(current, scaled_sum, output);
@@ -522,14 +708,14 @@
 void Histogram::GetCountAndBucketData(Count* count,
                                       int64_t* sum,
                                       ListValue* buckets) const {
-  scoped_ptr<SampleVector> snapshot = SnapshotSampleVector();
+  std::unique_ptr<SampleVector> snapshot = SnapshotSampleVector();
   *count = snapshot->TotalCount();
   *sum = snapshot->sum();
-  size_t index = 0;
-  for (size_t i = 0; i < bucket_count(); ++i) {
+  uint32_t index = 0;
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
     Sample count_at_index = snapshot->GetCountAtIndex(i);
     if (count_at_index > 0) {
-      scoped_ptr<DictionaryValue> bucket_value(new DictionaryValue());
+      std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
       bucket_value->SetInteger("low", ranges(i));
       if (i != bucket_count() - 1)
         bucket_value->SetInteger("high", ranges(i + 1));
@@ -545,12 +731,57 @@
 // buckets.
 //------------------------------------------------------------------------------
 
+class LinearHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags,
+          const DescriptionPair* descriptions)
+    : Histogram::Factory(name, LINEAR_HISTOGRAM, minimum, maximum,
+                         bucket_count, flags) {
+    descriptions_ = descriptions;
+  }
+  ~Factory() override = default;
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+    LinearHistogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+    return ranges;
+  }
+
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(
+        new LinearHistogram(name_, minimum_, maximum_, ranges));
+  }
+
+  void FillHistogram(HistogramBase* base_histogram) override {
+    Histogram::Factory::FillHistogram(base_histogram);
+    LinearHistogram* histogram = static_cast<LinearHistogram*>(base_histogram);
+    // Set range descriptions.
+    if (descriptions_) {
+      for (int i = 0; descriptions_[i].description; ++i) {
+        histogram->bucket_description_[descriptions_[i].sample] =
+            descriptions_[i].description;
+      }
+    }
+  }
+
+ private:
+  const DescriptionPair* descriptions_;
+
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
 LinearHistogram::~LinearHistogram() {}
 
 HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
                                            Sample minimum,
                                            Sample maximum,
-                                           size_t bucket_count,
+                                           uint32_t bucket_count,
                                            int32_t flags) {
   return FactoryGetWithRangeDescription(
       name, minimum, maximum, bucket_count, flags, NULL);
@@ -559,7 +790,7 @@
 HistogramBase* LinearHistogram::FactoryTimeGet(const std::string& name,
                                                TimeDelta minimum,
                                                TimeDelta maximum,
-                                               size_t bucket_count,
+                                               uint32_t bucket_count,
                                                int32_t flags) {
   return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
                     static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
@@ -569,7 +800,7 @@
 HistogramBase* LinearHistogram::FactoryGet(const char* name,
                                            Sample minimum,
                                            Sample maximum,
-                                           size_t bucket_count,
+                                           uint32_t bucket_count,
                                            int32_t flags) {
   return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
 }
@@ -577,59 +808,40 @@
 HistogramBase* LinearHistogram::FactoryTimeGet(const char* name,
                                                TimeDelta minimum,
                                                TimeDelta maximum,
-                                               size_t bucket_count,
+                                               uint32_t bucket_count,
                                                int32_t flags) {
   return FactoryTimeGet(std::string(name),  minimum, maximum, bucket_count,
                         flags);
 }
 
+std::unique_ptr<HistogramBase> LinearHistogram::PersistentCreate(
+    const std::string& name,
+    Sample minimum,
+    Sample maximum,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    uint32_t counts_size,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new LinearHistogram(name, minimum, maximum, ranges,
+                                              counts, logged_counts,
+                                              counts_size, meta, logged_meta));
+}
+
 HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
     const std::string& name,
     Sample minimum,
     Sample maximum,
-    size_t bucket_count,
+    uint32_t bucket_count,
     int32_t flags,
     const DescriptionPair descriptions[]) {
   bool valid_arguments = Histogram::InspectConstructionArguments(
       name, &minimum, &maximum, &bucket_count);
   DCHECK(valid_arguments);
 
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    BucketRanges* ranges = new BucketRanges(bucket_count + 1);
-    InitializeBucketRanges(minimum, maximum, ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
-
-    LinearHistogram* tentative_histogram =
-        new LinearHistogram(name, minimum, maximum, registered_ranges);
-
-    // Set range descriptions.
-    if (descriptions) {
-      for (int i = 0; descriptions[i].description; ++i) {
-        tentative_histogram->bucket_description_[descriptions[i].sample] =
-            descriptions[i].description;
-      }
-    }
-
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
-  }
-
-  DCHECK_EQ(LINEAR_HISTOGRAM, histogram->GetHistogramType());
-  if (!histogram->HasConstructionArguments(minimum, maximum, bucket_count)) {
-    // The construction arguments do not match the existing histogram.  This can
-    // come about if an extension updates in the middle of a chrome run and has
-    // changed one of them, or simply by bad code within Chrome itself.  We
-    // return NULL here with the expectation that bad code in Chrome will crash
-    // on dereference, but extension/Pepper APIs will guard against NULL and not
-    // crash.
-    LOG(ERROR) << "Histogram " << name << " has bad construction arguments";
-    return NULL;
-  }
-  return histogram;
+  return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
+      .Build();
 }
 
 HistogramType LinearHistogram::GetHistogramType() const {
@@ -643,7 +855,19 @@
     : Histogram(name, minimum, maximum, ranges) {
 }
 
-double LinearHistogram::GetBucketSize(Count current, size_t i) const {
+LinearHistogram::LinearHistogram(const std::string& name,
+                                 Sample minimum,
+                                 Sample maximum,
+                                 const BucketRanges* ranges,
+                                 HistogramBase::AtomicCount* counts,
+                                 HistogramBase::AtomicCount* logged_counts,
+                                 uint32_t counts_size,
+                                 HistogramSamples::Metadata* meta,
+                                 HistogramSamples::Metadata* logged_meta)
+    : Histogram(name, minimum, maximum, ranges, counts, logged_counts,
+                counts_size, meta, logged_meta) {}
+
+double LinearHistogram::GetBucketSize(Count current, uint32_t i) const {
   DCHECK_GT(ranges(i + 1), ranges(i));
   // Adjacent buckets with different widths would have "surprisingly" many (few)
   // samples in a histogram if we didn't normalize this way.
@@ -651,7 +875,7 @@
   return current/denominator;
 }
 
-const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const {
+const std::string LinearHistogram::GetAsciiBucketRange(uint32_t i) const {
   int range = ranges(i);
   BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
   if (it == bucket_description_.end())
@@ -659,7 +883,7 @@
   return it->second;
 }
 
-bool LinearHistogram::PrintEmptyBucket(size_t index) const {
+bool LinearHistogram::PrintEmptyBucket(uint32_t index) const {
   return bucket_description_.find(ranges(index)) == bucket_description_.end();
 }
 
@@ -674,6 +898,8 @@
     double linear_range =
         (min * (bucket_count - 1 - i) + max * (i - 1)) / (bucket_count - 2);
     ranges->set_range(i, static_cast<Sample>(linear_range + 0.5));
+    // TODO(bcwhite): Remove once crbug/586622 is fixed.
+    base::debug::Alias(&linear_range);
   }
   ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
   ranges->ResetChecksum();
@@ -685,7 +911,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -706,32 +932,48 @@
 // This section provides implementation for BooleanHistogram.
 //------------------------------------------------------------------------------
 
-HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
-                                            int32_t flags) {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    BucketRanges* ranges = new BucketRanges(4);
+class BooleanHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name, int32_t flags)
+    : Histogram::Factory(name, BOOLEAN_HISTOGRAM, 1, 2, 3, flags) {}
+  ~Factory() override = default;
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    BucketRanges* ranges = new BucketRanges(3 + 1);
     LinearHistogram::InitializeBucketRanges(1, 2, ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
-
-    BooleanHistogram* tentative_histogram =
-        new BooleanHistogram(name, registered_ranges);
-
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+    return ranges;
   }
 
-  DCHECK_EQ(BOOLEAN_HISTOGRAM, histogram->GetHistogramType());
-  return histogram;
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(new BooleanHistogram(name_, ranges));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
+                                            int32_t flags) {
+  return Factory(name, flags).Build();
 }
 
 HistogramBase* BooleanHistogram::FactoryGet(const char* name, int32_t flags) {
   return FactoryGet(std::string(name), flags);
 }
 
+std::unique_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
+    const std::string& name,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new BooleanHistogram(
+      name, ranges, counts, logged_counts, meta, logged_meta));
+}
+
 HistogramType BooleanHistogram::GetHistogramType() const {
   return BOOLEAN_HISTOGRAM;
 }
@@ -740,12 +982,21 @@
                                    const BucketRanges* ranges)
     : LinearHistogram(name, 1, 2, ranges) {}
 
+BooleanHistogram::BooleanHistogram(const std::string& name,
+                                   const BucketRanges* ranges,
+                                   HistogramBase::AtomicCount* counts,
+                                   HistogramBase::AtomicCount* logged_counts,
+                                   HistogramSamples::Metadata* meta,
+                                   HistogramSamples::Metadata* logged_meta)
+    : LinearHistogram(name, 1, 2, ranges, counts, logged_counts, 2, meta,
+                      logged_meta) {}
+
 HistogramBase* BooleanHistogram::DeserializeInfoImpl(PickleIterator* iter) {
   std::string histogram_name;
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -766,30 +1017,51 @@
 // CustomHistogram:
 //------------------------------------------------------------------------------
 
+class CustomHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          const std::vector<Sample>* custom_ranges,
+          int32_t flags)
+    : Histogram::Factory(name, CUSTOM_HISTOGRAM, 0, 0, 0, flags) {
+    custom_ranges_ = custom_ranges;
+  }
+  ~Factory() override = default;
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    // Remove the duplicates in the custom ranges array.
+    std::vector<int> ranges = *custom_ranges_;
+    ranges.push_back(0);  // Ensure we have a zero value.
+    ranges.push_back(HistogramBase::kSampleType_MAX);
+    std::sort(ranges.begin(), ranges.end());
+    ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
+
+    BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
+    for (uint32_t i = 0; i < ranges.size(); i++) {
+      bucket_ranges->set_range(i, ranges[i]);
+    }
+    bucket_ranges->ResetChecksum();
+    return bucket_ranges;
+  }
+
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(new CustomHistogram(name_, ranges));
+  }
+
+ private:
+  const std::vector<Sample>* custom_ranges_;
+
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
 HistogramBase* CustomHistogram::FactoryGet(
     const std::string& name,
     const std::vector<Sample>& custom_ranges,
     int32_t flags) {
   CHECK(ValidateCustomRanges(custom_ranges));
 
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    BucketRanges* ranges = CreateBucketRangesFromCustomRanges(custom_ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
-
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    CustomHistogram* tentative_histogram =
-        new CustomHistogram(name, registered_ranges);
-
-    tentative_histogram->SetFlags(flags);
-
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
-  }
-
-  DCHECK_EQ(histogram->GetHistogramType(), CUSTOM_HISTOGRAM);
-  return histogram;
+  return Factory(name, &custom_ranges, flags).Build();
 }
 
 HistogramBase* CustomHistogram::FactoryGet(
@@ -799,15 +1071,27 @@
   return FactoryGet(std::string(name), custom_ranges, flags);
 }
 
+std::unique_ptr<HistogramBase> CustomHistogram::PersistentCreate(
+    const std::string& name,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    uint32_t counts_size,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(new CustomHistogram(
+      name, ranges, counts, logged_counts, counts_size, meta, logged_meta));
+}
+
 HistogramType CustomHistogram::GetHistogramType() const {
   return CUSTOM_HISTOGRAM;
 }
 
 // static
 std::vector<Sample> CustomHistogram::ArrayToCustomRanges(
-    const Sample* values, size_t num_values) {
+    const Sample* values, uint32_t num_values) {
   std::vector<Sample> all_values;
-  for (size_t i = 0; i < num_values; ++i) {
+  for (uint32_t i = 0; i < num_values; ++i) {
     Sample value = values[i];
     all_values.push_back(value);
 
@@ -825,21 +1109,37 @@
                 ranges->range(ranges->bucket_count() - 1),
                 ranges) {}
 
+CustomHistogram::CustomHistogram(const std::string& name,
+                                 const BucketRanges* ranges,
+                                 HistogramBase::AtomicCount* counts,
+                                 HistogramBase::AtomicCount* logged_counts,
+                                 uint32_t counts_size,
+                                 HistogramSamples::Metadata* meta,
+                                 HistogramSamples::Metadata* logged_meta)
+    : Histogram(name,
+                ranges->range(1),
+                ranges->range(ranges->bucket_count() - 1),
+                ranges,
+                counts,
+                logged_counts,
+                counts_size,
+                meta,
+                logged_meta) {}
+
 bool CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
   if (!Histogram::SerializeInfoImpl(pickle))
     return false;
 
   // Serialize ranges. First and last ranges are alwasy 0 and INT_MAX, so don't
   // write them.
-  for (size_t i = 1; i < bucket_ranges()->bucket_count(); ++i) {
+  for (uint32_t i = 1; i < bucket_ranges()->bucket_count(); ++i) {
     if (!pickle->WriteInt(bucket_ranges()->range(i)))
       return false;
   }
   return true;
 }
 
-double CustomHistogram::GetBucketSize(Count /* current */,
-                                      size_t /* i */) const {
+double CustomHistogram::GetBucketSize(Count /*current*/, uint32_t /*i*/) const {
   return 1;
 }
 
@@ -849,7 +1149,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -860,7 +1160,7 @@
   // First and last ranges are not serialized.
   std::vector<Sample> sample_ranges(bucket_count - 1);
 
-  for (size_t i = 0; i < sample_ranges.size(); ++i) {
+  for (uint32_t i = 0; i < sample_ranges.size(); ++i) {
     if (!iter->ReadInt(&sample_ranges[i]))
       return NULL;
   }
@@ -878,7 +1178,7 @@
 bool CustomHistogram::ValidateCustomRanges(
     const std::vector<Sample>& custom_ranges) {
   bool has_valid_range = false;
-  for (size_t i = 0; i < custom_ranges.size(); i++) {
+  for (uint32_t i = 0; i < custom_ranges.size(); i++) {
     Sample sample = custom_ranges[i];
     if (sample < 0 || sample > HistogramBase::kSampleType_MAX - 1)
       return false;
@@ -888,22 +1188,4 @@
   return has_valid_range;
 }
 
-// static
-BucketRanges* CustomHistogram::CreateBucketRangesFromCustomRanges(
-      const std::vector<Sample>& custom_ranges) {
-  // Remove the duplicates in the custom ranges array.
-  std::vector<int> ranges = custom_ranges;
-  ranges.push_back(0);  // Ensure we have a zero value.
-  ranges.push_back(HistogramBase::kSampleType_MAX);
-  std::sort(ranges.begin(), ranges.end());
-  ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
-
-  BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
-  for (size_t i = 0; i < ranges.size(); i++) {
-    bucket_ranges->set_range(i, ranges[i]);
-  }
-  bucket_ranges->ResetChecksum();
-  return bucket_ranges;
-}
-
 }  // namespace base
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
index 28bb29b..2283a4d 100644
--- a/base/metrics/histogram.h
+++ b/base/metrics/histogram.h
@@ -70,6 +70,7 @@
 #include <stdint.h>
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -78,7 +79,6 @@
 #include "base/gtest_prod_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_base.h"
 // TODO(asvitkine): Migrate callers to to include this directly and remove this.
@@ -92,6 +92,7 @@
 class CustomHistogram;
 class Histogram;
 class LinearHistogram;
+class PersistentMemoryAllocator;
 class Pickle;
 class PickleIterator;
 class SampleVector;
@@ -99,10 +100,12 @@
 class BASE_EXPORT Histogram : public HistogramBase {
  public:
   // Initialize maximum number of buckets in histograms as 16,384.
-  static const size_t kBucketCount_MAX;
+  static const uint32_t kBucketCount_MAX;
 
   typedef std::vector<Count> Counts;
 
+  ~Histogram() override;
+
   //----------------------------------------------------------------------------
   // For a valid histogram, input should follow these restrictions:
   // minimum > 0 (if a minimum below 1 is specified, it will implicitly be
@@ -116,12 +119,12 @@
   static HistogramBase* FactoryGet(const std::string& name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const std::string& name,
                                        base::TimeDelta minimum,
                                        base::TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
   // Overloads of the above two functions that take a const char* |name| param,
@@ -130,14 +133,26 @@
   static HistogramBase* FactoryGet(const char* name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const char* name,
                                        base::TimeDelta minimum,
                                        base::TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      Sample minimum,
+      Sample maximum,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      uint32_t counts_size,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   static void InitializeBucketRanges(Sample minimum,
                                      Sample maximum,
                                      BucketRanges* ranges);
@@ -155,16 +170,17 @@
   // consistent with the bucket ranges and checksums in our histogram.  This can
   // produce a false-alarm if a race occurred in the reading of the data during
   // a SnapShot process, but should otherwise be false at all times (unless we
-  // have memory over-writes, or DRAM failures).
-  int FindCorruption(const HistogramSamples& samples) const override;
+  // have memory over-writes, or DRAM failures). Flag definitions are located
+  // under "enum Inconsistency" in base/metrics/histogram_base.h.
+  uint32_t FindCorruption(const HistogramSamples& samples) const override;
 
   //----------------------------------------------------------------------------
   // Accessors for factory construction, serialization and testing.
   //----------------------------------------------------------------------------
   Sample declared_min() const { return declared_min_; }
   Sample declared_max() const { return declared_max_; }
-  virtual Sample ranges(size_t i) const;
-  virtual size_t bucket_count() const;
+  virtual Sample ranges(uint32_t i) const;
+  virtual uint32_t bucket_count() const;
   const BucketRanges* bucket_ranges() const { return bucket_ranges_; }
 
   // This function validates histogram construction arguments. It returns false
@@ -176,23 +192,32 @@
   static bool InspectConstructionArguments(const std::string& name,
                                            Sample* minimum,
                                            Sample* maximum,
-                                           size_t* bucket_count);
+                                           uint32_t* bucket_count);
 
   // HistogramBase implementation:
   uint64_t name_hash() const override;
   HistogramType GetHistogramType() const override;
   bool HasConstructionArguments(Sample expected_minimum,
                                 Sample expected_maximum,
-                                size_t expected_bucket_count) const override;
+                                uint32_t expected_bucket_count) const override;
   void Add(Sample value) override;
   void AddCount(Sample value, int count) override;
-  scoped_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
   void WriteHTMLGraph(std::string* output) const override;
   void WriteAscii(std::string* output) const override;
 
  protected:
+  // This class, defined entirely within the .cc file, contains all the
+  // common logic for building a Histogram and can be overridden by more
+  // specific types to alter details of how the creation is done. It is
+  // defined as an embedded class (rather than an anonymous one) so it
+  // can access the protected constructors.
+  class Factory;
+
   // |ranges| should contain the underflow and overflow buckets. See top
   // comments for example.
   Histogram(const std::string& name,
@@ -200,30 +225,41 @@
             Sample maximum,
             const BucketRanges* ranges);
 
-  ~Histogram() override;
+  // Traditionally, histograms allocate their own memory for the bucket
+  // vector but "shared" histograms use memory regions allocated from a
+  // special memory segment that is passed in here.  It is assumed that
+  // the life of this memory is managed externally and exceeds the lifetime
+  // of this object. Practically, this memory is never released until the
+  // process exits and the OS cleans it up.
+  Histogram(const std::string& name,
+            Sample minimum,
+            Sample maximum,
+            const BucketRanges* ranges,
+            HistogramBase::AtomicCount* counts,
+            HistogramBase::AtomicCount* logged_counts,
+            uint32_t counts_size,
+            HistogramSamples::Metadata* meta,
+            HistogramSamples::Metadata* logged_meta);
 
   // HistogramBase implementation:
   bool SerializeInfoImpl(base::Pickle* pickle) const override;
 
   // Method to override to skip the display of the i'th bucket if it's empty.
-  virtual bool PrintEmptyBucket(size_t index) const;
+  virtual bool PrintEmptyBucket(uint32_t index) const;
 
   // Get normalized size, relative to the ranges(i).
-  virtual double GetBucketSize(Count current, size_t i) const;
+  virtual double GetBucketSize(Count current, uint32_t i) const;
 
   // Return a string description of what goes in a given bucket.
   // Most commonly this is the numeric value, but in derived classes it may
   // be a name (or string description) given to the bucket.
-  virtual const std::string GetAsciiBucketRange(size_t it) const;
+  virtual const std::string GetAsciiBucketRange(uint32_t it) const;
 
  private:
   // Allow tests to corrupt our innards for testing purposes.
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, BoundsTest);
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, BucketPlacementTest);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptBucketBounds);
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, NameMatchTest);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, AddCountTest);
 
   friend class StatisticsRecorder;  // To allow it to delete duplicates.
   friend class StatisticsRecorderTest;
@@ -233,7 +269,7 @@
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
 
   // Implementation of SnapshotSamples function.
-  scoped_ptr<SampleVector> SnapshotSampleVector() const;
+  std::unique_ptr<SampleVector> SnapshotSampleVector() const;
 
   //----------------------------------------------------------------------------
   // Helpers for emitting Ascii graphic.  Each method appends data to output.
@@ -255,7 +291,7 @@
   void WriteAsciiBucketContext(const int64_t past,
                                const Count current,
                                const int64_t remaining,
-                               const size_t i,
+                               const uint32_t i,
                                std::string* output) const;
 
   // WriteJSON calls these.
@@ -273,7 +309,14 @@
 
   // Finally, provide the state that changes with the addition of each new
   // sample.
-  scoped_ptr<SampleVector> samples_;
+  std::unique_ptr<SampleVector> samples_;
+
+  // Also keep a previous uploaded state for calculating deltas.
+  std::unique_ptr<HistogramSamples> logged_samples_;
+
+  // Flag to indicate if PrepareFinalDelta has been previously called. It is
+  // used to DCHECK that a final delta is not created multiple times.
+  mutable bool final_delta_created_ = false;
 
   DISALLOW_COPY_AND_ASSIGN(Histogram);
 };
@@ -291,12 +334,12 @@
   static HistogramBase* FactoryGet(const std::string& name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const std::string& name,
                                        TimeDelta minimum,
                                        TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
   // Overloads of the above two functions that take a const char* |name| param,
@@ -305,14 +348,26 @@
   static HistogramBase* FactoryGet(const char* name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const char* name,
                                        TimeDelta minimum,
                                        TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      Sample minimum,
+      Sample maximum,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      uint32_t counts_size,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   struct DescriptionPair {
     Sample sample;
     const char* description;  // Null means end of a list of pairs.
@@ -327,7 +382,7 @@
       const std::string& name,
       Sample minimum,
       Sample maximum,
-      size_t bucket_count,
+      uint32_t bucket_count,
       int32_t flags,
       const DescriptionPair descriptions[]);
 
@@ -339,20 +394,32 @@
   HistogramType GetHistogramType() const override;
 
  protected:
+  class Factory;
+
   LinearHistogram(const std::string& name,
                   Sample minimum,
                   Sample maximum,
                   const BucketRanges* ranges);
 
-  double GetBucketSize(Count current, size_t i) const override;
+  LinearHistogram(const std::string& name,
+                  Sample minimum,
+                  Sample maximum,
+                  const BucketRanges* ranges,
+                  HistogramBase::AtomicCount* counts,
+                  HistogramBase::AtomicCount* logged_counts,
+                  uint32_t counts_size,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
+  double GetBucketSize(Count current, uint32_t i) const override;
 
   // If we have a description for a bucket, then return that.  Otherwise
   // let parent class provide a (numeric) description.
-  const std::string GetAsciiBucketRange(size_t i) const override;
+  const std::string GetAsciiBucketRange(uint32_t i) const override;
 
   // Skip printing of name for numeric range if we have a name (and if this is
   // an empty bucket).
-  bool PrintEmptyBucket(size_t index) const override;
+  bool PrintEmptyBucket(uint32_t index) const override;
 
  private:
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
@@ -380,10 +447,28 @@
   // call sites.
   static HistogramBase* FactoryGet(const char* name, int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   HistogramType GetHistogramType() const override;
 
+ protected:
+  class Factory;
+
  private:
   BooleanHistogram(const std::string& name, const BucketRanges* ranges);
+  BooleanHistogram(const std::string& name,
+                   const BucketRanges* ranges,
+                   HistogramBase::AtomicCount* counts,
+                   HistogramBase::AtomicCount* logged_counts,
+                   HistogramSamples::Metadata* meta,
+                   HistogramSamples::Metadata* logged_meta);
 
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
       base::PickleIterator* iter);
@@ -412,6 +497,16 @@
                                    const std::vector<Sample>& custom_ranges,
                                    int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      uint32_t counts_size,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   // Overridden from Histogram:
   HistogramType GetHistogramType() const override;
 
@@ -422,15 +517,25 @@
   // so that invalid samples never fall into the same bucket as valid samples.
   // TODO(kaiwang): Change name to ArrayToCustomEnumRanges.
   static std::vector<Sample> ArrayToCustomRanges(const Sample* values,
-                                                 size_t num_values);
+                                                 uint32_t num_values);
  protected:
+  class Factory;
+
   CustomHistogram(const std::string& name,
                   const BucketRanges* ranges);
 
+  CustomHistogram(const std::string& name,
+                  const BucketRanges* ranges,
+                  HistogramBase::AtomicCount* counts,
+                  HistogramBase::AtomicCount* logged_counts,
+                  uint32_t counts_size,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
   // HistogramBase implementation:
   bool SerializeInfoImpl(base::Pickle* pickle) const override;
 
-  double GetBucketSize(Count current, size_t i) const override;
+  double GetBucketSize(Count current, uint32_t i) const override;
 
  private:
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
@@ -438,8 +543,6 @@
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
 
   static bool ValidateCustomRanges(const std::vector<Sample>& custom_ranges);
-  static BucketRanges* CreateBucketRangesFromCustomRanges(
-      const std::vector<Sample>& custom_ranges);
 
   DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
 };
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
index d8aefb1..8c4f1ec 100644
--- a/base/metrics/histogram_base.cc
+++ b/base/metrics/histogram_base.cc
@@ -6,11 +6,11 @@
 
 #include <limits.h>
 
+#include <memory>
 #include <utility>
 
 #include "base/json/json_string_value_serializer.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/sparse_histogram.h"
@@ -34,9 +34,8 @@
       return "CUSTOM_HISTOGRAM";
     case SPARSE_HISTOGRAM:
       return "SPARSE_HISTOGRAM";
-    default:
-      NOTREACHED();
   }
+  NOTREACHED();
   return "UNKNOWN";
 }
 
@@ -62,6 +61,7 @@
 }
 
 const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
+HistogramBase* HistogramBase::report_histogram_ = nullptr;
 
 HistogramBase::HistogramBase(const std::string& name)
     : histogram_name_(name),
@@ -97,7 +97,8 @@
   return SerializeInfoImpl(pickle);
 }
 
-int HistogramBase::FindCorruption(const HistogramSamples& /* samples */) const {
+uint32_t HistogramBase::FindCorruption(
+    const HistogramSamples& /*samples*/) const {
   // Not supported by default.
   return NO_INCONSISTENCIES;
 }
@@ -105,9 +106,9 @@
 void HistogramBase::WriteJSON(std::string* output) const {
   Count count;
   int64_t sum;
-  scoped_ptr<ListValue> buckets(new ListValue());
+  std::unique_ptr<ListValue> buckets(new ListValue());
   GetCountAndBucketData(&count, &sum, buckets.get());
-  scoped_ptr<DictionaryValue> parameters(new DictionaryValue());
+  std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
   GetParameters(parameters.get());
 
   JSONStringValueSerializer serializer(output);
@@ -122,6 +123,30 @@
   serializer.Serialize(root);
 }
 
+// static
+void HistogramBase::EnableActivityReportHistogram(
+    const std::string& process_type) {
+  DCHECK(!report_histogram_);
+  size_t existing = StatisticsRecorder::GetHistogramCount();
+  if (existing != 0) {
+    DVLOG(1) << existing
+             << " histograms were created before reporting was enabled.";
+  }
+
+  std::string name =
+      "UMA.Histograms.Activity" +
+      (process_type.empty() ? process_type : "." + process_type);
+
+  // Calling FactoryGet() here rather than using a histogram-macro works
+  // around some problems with tests that could end up seeing the results
+  // histogram when not expected due to a bad interaction between
+  // HistogramTester and StatisticsRecorder.
+  report_histogram_ = LinearHistogram::FactoryGet(
+      name, 1, HISTOGRAM_REPORT_MAX, HISTOGRAM_REPORT_MAX + 1,
+      kUmaTargetedHistogramFlag);
+  report_histogram_->Add(HISTOGRAM_REPORT_CREATED);
+}
+
 void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
   if ((flags() & kCallbackExists) == 0)
     return;
@@ -163,4 +188,47 @@
   StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
 }
 
+// static
+void HistogramBase::ReportHistogramActivity(const HistogramBase& histogram,
+                                            ReportActivity activity) {
+  if (!report_histogram_)
+    return;
+
+  const int32_t flags = histogram.flags_;
+  HistogramReport report_type = HISTOGRAM_REPORT_MAX;
+  switch (activity) {
+    case HISTOGRAM_CREATED:
+      report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_CREATED);
+      switch (histogram.GetHistogramType()) {
+        case HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_LOGARITHMIC;
+          break;
+        case LINEAR_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_LINEAR;
+          break;
+        case BOOLEAN_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_BOOLEAN;
+          break;
+        case CUSTOM_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_CUSTOM;
+          break;
+        case SPARSE_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_SPARSE;
+          break;
+      }
+      report_histogram_->Add(report_type);
+      if (flags & kIsPersistent)
+        report_histogram_->Add(HISTOGRAM_REPORT_FLAG_PERSISTENT);
+      if ((flags & kUmaStabilityHistogramFlag) == kUmaStabilityHistogramFlag)
+        report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_STABILITY);
+      else if (flags & kUmaTargetedHistogramFlag)
+        report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_TARGETED);
+      break;
+
+    case HISTOGRAM_LOOKUP:
+      report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP);
+      break;
+  }
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
index 4fa07c6..d240099 100644
--- a/base/metrics/histogram_base.h
+++ b/base/metrics/histogram_base.h
@@ -9,18 +9,19 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 #include "base/time/time.h"
 
 namespace base {
 
+class BucketRanges;
 class DictionaryValue;
 class HistogramBase;
 class HistogramSamples;
@@ -29,7 +30,7 @@
 class PickleIterator;
 
 ////////////////////////////////////////////////////////////////////////////////
-// These enums are used to facilitate deserialization of histograms from other
+// This enum is used to facilitate deserialization of histograms from other
 // processes into the browser. If you create another class that inherits from
 // HistogramBase, add new histogram types and names below.
 
@@ -43,6 +44,39 @@
 
 std::string HistogramTypeToString(HistogramType type);
 
+// This enum is used for reporting how many histograms and of what types and
+// variations are being created. It has to be in the main .h file so it is
+// visible to files that define the various histogram types.
+enum HistogramReport {
+  // Count the number of reports created. The other counts divided by this
+  // number will give the average per run of the program.
+  HISTOGRAM_REPORT_CREATED = 0,
+
+  // Count the total number of histograms created. It is the limit against
+  // which all others are compared.
+  HISTOGRAM_REPORT_HISTOGRAM_CREATED = 1,
+
+  // Count the total number of histograms looked-up. It's better to cache
+  // the result of a single lookup rather than do it repeatedly.
+  HISTOGRAM_REPORT_HISTOGRAM_LOOKUP = 2,
+
+  // These count the individual histogram types. This must follow the order
+  // of HistogramType above.
+  HISTOGRAM_REPORT_TYPE_LOGARITHMIC = 3,
+  HISTOGRAM_REPORT_TYPE_LINEAR = 4,
+  HISTOGRAM_REPORT_TYPE_BOOLEAN = 5,
+  HISTOGRAM_REPORT_TYPE_CUSTOM = 6,
+  HISTOGRAM_REPORT_TYPE_SPARSE = 7,
+
+  // These indicate the individual flags that were set.
+  HISTOGRAM_REPORT_FLAG_UMA_TARGETED = 8,
+  HISTOGRAM_REPORT_FLAG_UMA_STABILITY = 9,
+  HISTOGRAM_REPORT_FLAG_PERSISTENT = 10,
+
+  // This must be last.
+  HISTOGRAM_REPORT_MAX = 11
+};
+
 // Create or find existing histogram that matches the pickled info.
 // Returns NULL if the pickled data has problems.
 BASE_EXPORT HistogramBase* DeserializeHistogramInfo(base::PickleIterator* iter);
@@ -81,19 +115,26 @@
     // to shortcut looking up the callback if it doesn't exist.
     kCallbackExists = 0x20,
 
+    // Indicates that the histogram is held in "persistent" memory and may
+    // be accessible between processes. This is only possible if such a
+    // memory segment has been created/attached, used to create a Persistent-
+    // MemoryAllocator, and that loaded into the Histogram module before this
+    // histogram is created.
+    kIsPersistent = 0x40,
+
     // Only for Histogram and its sub classes: fancy bucket-naming support.
     kHexRangePrintingFlag = 0x8000,
   };
 
   // Histogram data inconsistency types.
-  enum Inconsistency {
+  enum Inconsistency : uint32_t {
     NO_INCONSISTENCIES = 0x0,
     RANGE_CHECKSUM_ERROR = 0x1,
     BUCKET_ORDER_ERROR = 0x2,
     COUNT_HIGH_ERROR = 0x4,
     COUNT_LOW_ERROR = 0x8,
 
-    NEVER_EXCEEDED_VALUE = 0x10
+    NEVER_EXCEEDED_VALUE = 0x10,
   };
 
   explicit HistogramBase(const std::string& name);
@@ -119,9 +160,10 @@
   // Whether the histogram has construction arguments as parameters specified.
   // For histograms that don't have the concept of minimum, maximum or
   // bucket_count, this function always returns false.
-  virtual bool HasConstructionArguments(Sample expected_minimum,
-                                        Sample expected_maximum,
-                                        size_t expected_bucket_count) const = 0;
+  virtual bool HasConstructionArguments(
+      Sample expected_minimum,
+      Sample expected_maximum,
+      uint32_t expected_bucket_count) const = 0;
 
   virtual void Add(Sample value) = 0;
 
@@ -145,11 +187,25 @@
 
   // Try to find out data corruption from histogram and the samples.
   // The returned value is a combination of Inconsistency enum.
-  virtual int FindCorruption(const HistogramSamples& samples) const;
+  virtual uint32_t FindCorruption(const HistogramSamples& samples) const;
 
   // Snapshot the current complete set of sample data.
   // Override with atomic/locked snapshot if needed.
-  virtual scoped_ptr<HistogramSamples> SnapshotSamples() const = 0;
+  virtual std::unique_ptr<HistogramSamples> SnapshotSamples() const = 0;
+
+  // Calculate the change (delta) in histogram counts since the previous call
+  // to this method. Each successive call will return only those counts
+  // changed since the last call.
+  virtual std::unique_ptr<HistogramSamples> SnapshotDelta() = 0;
+
+  // Calculate the change (delta) in histogram counts since the previous call
+  // to SnapshotDelta() but do so without modifying any internal data as to
+  // what was previous logged. After such a call, no further calls to this
+  // method or to SnapshotDelta() should be done as the result would include
+  // data previously returned. Because no internal data is changed, this call
+  // can be made on "const" histograms such as those with data held in
+  // read-only memory.
+  virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0;
 
   // The following methods provide graphical histogram displays.
   virtual void WriteHTMLGraph(std::string* output) const = 0;
@@ -160,7 +216,17 @@
   // customize the output.
   void WriteJSON(std::string* output) const;
 
+  // This enables a histogram that reports the what types of histograms are
+  // created and their flags. It must be called while still single-threaded.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histogram:
+  //    UMA.Histograms.process_type.Creations
+  static void EnableActivityReportHistogram(const std::string& process_type);
+
  protected:
+  enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
+
   // Subclasses should implement this function to make SerializeInfo work.
   virtual bool SerializeInfoImpl(base::Pickle* pickle) const = 0;
 
@@ -192,7 +258,16 @@
   // passing |sample| as the parameter.
   void FindAndRunCallback(Sample sample) const;
 
+  // Update report with an |activity| that occurred for |histogram|.
+  static void ReportHistogramActivity(const HistogramBase& histogram,
+                                      ReportActivity activicty);
+
+  // Retrieves the global histogram reporting what histograms are created.
+  static HistogramBase* report_histogram_;
+
  private:
+  friend class HistogramBaseTest;
+
   const std::string histogram_name_;
   AtomicCount flags_;
 
diff --git a/base/metrics/histogram_base_unittest.cc b/base/metrics/histogram_base_unittest.cc
index 2d6b6df..1eb8fd4 100644
--- a/base/metrics/histogram_base_unittest.cc
+++ b/base/metrics/histogram_base_unittest.cc
@@ -18,19 +18,29 @@
   HistogramBaseTest() {
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
-    statistics_recorder_ = NULL;
     ResetStatisticsRecorder();
   }
 
-  ~HistogramBaseTest() override { delete statistics_recorder_; }
+  ~HistogramBaseTest() override {
+    HistogramBase::report_histogram_ = nullptr;
+  }
 
   void ResetStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = new StatisticsRecorder();
+    // It is necessary to fully destruct any existing StatisticsRecorder
+    // before creating a new one.
+    statistics_recorder_.reset();
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+  }
+
+  HistogramBase* GetCreationReportHistogram(const std::string& name) {
+    HistogramBase::EnableActivityReportHistogram(name);
+    return HistogramBase::report_histogram_;
   }
 
  private:
-  StatisticsRecorder* statistics_recorder_;
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramBaseTest);
 };
 
 TEST_F(HistogramBaseTest, DeserializeHistogram) {
@@ -152,4 +162,61 @@
   EXPECT_EQ(0, deserialized->flags());
 }
 
+TEST_F(HistogramBaseTest, CreationReportHistogram) {
+  // Enabled creation report. Itself is not included in the report.
+  HistogramBase* report = GetCreationReportHistogram("CreationReportTest");
+  ASSERT_TRUE(report);
+
+  std::vector<HistogramBase::Sample> ranges;
+  ranges.push_back(1);
+  ranges.push_back(2);
+  ranges.push_back(4);
+  ranges.push_back(8);
+  ranges.push_back(10);
+
+  // Create all histogram types and verify counts.
+  Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
+  LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
+  BooleanHistogram::FactoryGet("CRH-Boolean", 0);
+  CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
+  SparseHistogram::FactoryGet("CRH-Sparse", 0);
+
+  std::unique_ptr<HistogramSamples> samples = report->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+  EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+  EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LOGARITHMIC));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LINEAR));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_BOOLEAN));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_CUSTOM));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_SPARSE));
+
+  // Create all flag types and verify counts.
+  Histogram::FactoryGet("CRH-Histogram-UMA-Targeted", 1, 10, 5,
+                        HistogramBase::kUmaTargetedHistogramFlag);
+  Histogram::FactoryGet("CRH-Histogram-UMA-Stability", 1, 10, 5,
+                        HistogramBase::kUmaStabilityHistogramFlag);
+  SparseHistogram::FactoryGet("CRH-Sparse-UMA-Targeted",
+                              HistogramBase::kUmaTargetedHistogramFlag);
+  SparseHistogram::FactoryGet("CRH-Sparse-UMA-Stability",
+                              HistogramBase::kUmaStabilityHistogramFlag);
+  samples = report->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+  EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+  EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+  EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_TARGETED));
+  EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_STABILITY));
+
+  // Do lookup of existing histograms and verify counts.
+  Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
+  LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
+  BooleanHistogram::FactoryGet("CRH-Boolean", 0);
+  CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
+  SparseHistogram::FactoryGet("CRH-Sparse", 0);
+  samples = report->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+  EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+  EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.cc b/base/metrics/histogram_delta_serialization.cc
new file mode 100644
index 0000000..3e5d154
--- /dev/null
+++ b/base/metrics/histogram_delta_serialization.cc
@@ -0,0 +1,123 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/pickle.h"
+#include "base/values.h"
+
+namespace base {
+
+namespace {
+
+// Create or find existing histogram and add the samples from pickle.
+// Silently returns when seeing any data problem in the pickle.
+void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
+  HistogramBase* histogram = DeserializeHistogramInfo(iter);
+  if (!histogram)
+    return;
+
+  if (histogram->flags() & HistogramBase::kIPCSerializationSourceFlag) {
+    DVLOG(1) << "Single process mode, histogram observed and not copied: "
+             << histogram->histogram_name();
+    return;
+  }
+  histogram->AddSamplesFromPickle(iter);
+}
+
+}  // namespace
+
+HistogramDeltaSerialization::HistogramDeltaSerialization(
+    const std::string& caller_name)
+    : histogram_snapshot_manager_(this),
+      serialized_deltas_(NULL) {
+  inconsistencies_histogram_ =
+      LinearHistogram::FactoryGet(
+          "Histogram.Inconsistencies" + caller_name, 1,
+          HistogramBase::NEVER_EXCEEDED_VALUE,
+          HistogramBase::NEVER_EXCEEDED_VALUE + 1,
+          HistogramBase::kUmaTargetedHistogramFlag);
+
+  inconsistencies_unique_histogram_ =
+      LinearHistogram::FactoryGet(
+          "Histogram.Inconsistencies" + caller_name + "Unique", 1,
+          HistogramBase::NEVER_EXCEEDED_VALUE,
+          HistogramBase::NEVER_EXCEEDED_VALUE + 1,
+          HistogramBase::kUmaTargetedHistogramFlag);
+
+  inconsistent_snapshot_histogram_ =
+      Histogram::FactoryGet(
+          "Histogram.InconsistentSnapshot" + caller_name, 1, 1000000, 50,
+          HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+HistogramDeltaSerialization::~HistogramDeltaSerialization() {
+}
+
+void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
+    std::vector<std::string>* serialized_deltas,
+    bool include_persistent) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  serialized_deltas_ = serialized_deltas;
+  // Note: Before serializing, we set the kIPCSerializationSourceFlag for all
+  // the histograms, so that the receiving process can distinguish them from the
+  // local histograms.
+  histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(include_persistent), StatisticsRecorder::end(),
+      Histogram::kIPCSerializationSourceFlag, Histogram::kNoFlags);
+  serialized_deltas_ = NULL;
+}
+
+// static
+void HistogramDeltaSerialization::DeserializeAndAddSamples(
+    const std::vector<std::string>& serialized_deltas) {
+  for (std::vector<std::string>::const_iterator it = serialized_deltas.begin();
+       it != serialized_deltas.end(); ++it) {
+    Pickle pickle(it->data(), checked_cast<int>(it->size()));
+    PickleIterator iter(pickle);
+    DeserializeHistogramAndAddSamples(&iter);
+  }
+}
+
+void HistogramDeltaSerialization::RecordDelta(
+    const HistogramBase& histogram,
+    const HistogramSamples& snapshot) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK_NE(0, snapshot.TotalCount());
+
+  Pickle pickle;
+  histogram.SerializeInfo(&pickle);
+  snapshot.Serialize(&pickle);
+  serialized_deltas_->push_back(
+      std::string(static_cast<const char*>(pickle.data()), pickle.size()));
+}
+
+void HistogramDeltaSerialization::InconsistencyDetected(
+    HistogramBase::Inconsistency problem) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  inconsistencies_histogram_->Add(problem);
+}
+
+void HistogramDeltaSerialization::UniqueInconsistencyDetected(
+    HistogramBase::Inconsistency problem) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  inconsistencies_unique_histogram_->Add(problem);
+}
+
+void HistogramDeltaSerialization::InconsistencyDetectedInLoggedCount(
+    int amount) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  inconsistent_snapshot_histogram_->Add(std::abs(amount));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.h b/base/metrics/histogram_delta_serialization.h
index 0a3983f..3bb04cb 100644
--- a/base/metrics/histogram_delta_serialization.h
+++ b/base/metrics/histogram_delta_serialization.h
@@ -5,12 +5,12 @@
 #ifndef BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
 #define BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/base_export.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_flattener.h"
 #include "base/metrics/histogram_snapshot_manager.h"
 #include "base/threading/thread_checker.h"
@@ -28,9 +28,12 @@
 
   // Computes deltas in histogram bucket counts relative to the previous call to
   // this method. Stores the deltas in serialized form into |serialized_deltas|.
-  // If |serialized_deltas| is NULL, no data is serialized, though the next call
-  // will compute the deltas relative to this one.
-  void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas);
+  // If |serialized_deltas| is null, no data is serialized, though the next call
+  // will compute the deltas relative to this one. Setting |include_persistent|
+  // will include histograms held in persistent memory (and thus may be reported
+  // elsewhere); otherwise only histograms local to this process are serialized.
+  void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas,
+                                 bool include_persistent);
 
   // Deserialize deltas and add samples to corresponding histograms, creating
   // them if necessary. Silently ignores errors in |serialized_deltas|.
diff --git a/base/metrics/histogram_delta_serialization_unittest.cc b/base/metrics/histogram_delta_serialization_unittest.cc
new file mode 100644
index 0000000..719bc70
--- /dev/null
+++ b/base/metrics/histogram_delta_serialization_unittest.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include <vector>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/statistics_recorder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(HistogramDeltaSerializationTest, DeserializeHistogramAndAddSamples) {
+  std::unique_ptr<StatisticsRecorder> statistic_recorder(
+      StatisticsRecorder::CreateTemporaryForTesting());
+  HistogramDeltaSerialization serializer("HistogramDeltaSerializationTest");
+  std::vector<std::string> deltas;
+  // Nothing was changed yet.
+  serializer.PrepareAndSerializeDeltas(&deltas, true);
+  EXPECT_TRUE(deltas.empty());
+
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kIPCSerializationSourceFlag);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(100);
+  histogram->Add(1000);
+
+  serializer.PrepareAndSerializeDeltas(&deltas, true);
+  EXPECT_FALSE(deltas.empty());
+
+  HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+  // The histogram has kIPCSerializationSourceFlag. So samples will be ignored.
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(1, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(10));
+  EXPECT_EQ(1, snapshot->GetCount(100));
+  EXPECT_EQ(1, snapshot->GetCount(1000));
+
+  // Clear kIPCSerializationSourceFlag to emulate multi-process usage.
+  histogram->ClearFlags(HistogramBase::kIPCSerializationSourceFlag);
+  HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(2, snapshot2->GetCount(1));
+  EXPECT_EQ(2, snapshot2->GetCount(10));
+  EXPECT_EQ(2, snapshot2->GetCount(100));
+  EXPECT_EQ(2, snapshot2->GetCount(1000));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index 0492f0c..ce1811a 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -10,6 +10,11 @@
 #include "base/metrics/histogram.h"
 #include "base/time/time.h"
 
+// Macros for efficient use of histograms. See documentation in histogram.h.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is defined in sparse_histogram.h as it has
+// different #include dependencies.
+
 //------------------------------------------------------------------------------
 // Histograms are often put in areas where they are called many many times, and
 // performance is critical.  As a result, they are designed to have a very low
@@ -67,18 +72,24 @@
 // a macro argument here.  The name is only used in a DCHECK, to assure that
 // callers don't try to vary the name of the histogram (which would tend to be
 // ignored by the one-time initialization of the histogtram_pointer).
-#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,           \
-                                       histogram_add_method_invocation,   \
-                                       histogram_factory_get_invocation)  \
+
+// In some cases (integration into 3rd party code), it's useful to seperate the
+// definition of |atomic_histogram_poiner| from its use. To achieve this we
+// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
+// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
+// and forwards to HISTOGRAM_POINTER_USE.
+#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer,                   \
+                              constant_histogram_name,                    \
+                              histogram_add_method_invocation,            \
+                              histogram_factory_get_invocation)           \
   do {                                                                    \
-    static base::subtle::AtomicWord atomic_histogram_pointer = 0;         \
     base::HistogramBase* histogram_pointer(                               \
         reinterpret_cast<base::HistogramBase*>(                           \
-            base::subtle::Acquire_Load(&atomic_histogram_pointer)));      \
+            base::subtle::Acquire_Load(atomic_histogram_pointer)));       \
     if (!histogram_pointer) {                                             \
       histogram_pointer = histogram_factory_get_invocation;               \
       base::subtle::Release_Store(                                        \
-          &atomic_histogram_pointer,                                      \
+          atomic_histogram_pointer,                                       \
           reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
     }                                                                     \
     if (DCHECK_IS_ON())                                                   \
@@ -86,9 +97,27 @@
     histogram_pointer->histogram_add_method_invocation;                   \
   } while (0)
 
+// Defines the static |atomic_histogram_pointer| and forwards to
+// HISTOGRAM_POINTER_USE.
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,               \
+                                       histogram_add_method_invocation,       \
+                                       histogram_factory_get_invocation)      \
+  do {                                                                        \
+    static base::subtle::AtomicWord atomic_histogram_pointer = 0;             \
+    HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name, \
+                          histogram_add_method_invocation,                    \
+                          histogram_factory_get_invocation);                  \
+  } while (0)
+
 //------------------------------------------------------------------------------
 // Provide easy general purpose histogram in a macro, just like stats counters.
-// The first four macros use 50 buckets.
+// Most of these macros use 50 buckets, but check the definition for details.
+//
+// All of these macros must be called with |name| as a runtime constant --- it
+// doesn't have to literally be a constant, but it must be the same string on
+// all calls from a particular call site. If this rule is violated,
+// STATIC_HISTOGRAM_POINTER_BLOCK will DCHECK, and if DCHECKS are disabled, the
+// data will be written to the wrong histogram.
 
 #define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
     name, sample, base::TimeDelta::FromMilliseconds(1), \
diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
index 61efd23..ea3b987 100644
--- a/base/metrics/histogram_samples.cc
+++ b/base/metrics/histogram_samples.cc
@@ -73,28 +73,19 @@
 HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
     : meta_(meta) {
   DCHECK(meta_->id == 0 || meta_->id == id);
-  meta_->id = id;
+
+  // It's possible that |meta| is contained in initialized, read-only memory
+  // so it's essential that no write be done in that case.
+  if (!meta_->id)
+    meta_->id = id;
 }
 
 HistogramSamples::~HistogramSamples() {}
 
-// Despite using atomic operations, the increment/add actions below are *not*
-// atomic! Race conditions may cause loss of samples or even completely corrupt
-// the 64-bit sum on 32-bit machines. This is done intentionally to reduce the
-// cost of these operations that could be executed in performance-significant
-//  points of the code.
-//
-// TODO(bcwhite): Gather quantitative information as to the cost of using
-// proper atomic increments and improve either globally or for those histograms
-// that really need it.
-
 void HistogramSamples::Add(const HistogramSamples& other) {
-  meta_->sum += other.sum();
-
-  HistogramBase::Count old_redundant_count =
-      subtle::NoBarrier_Load(&meta_->redundant_count);
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-      old_redundant_count + other.redundant_count());
+  IncreaseSum(other.sum());
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+                                    other.redundant_count());
   bool success = AddSubtractImpl(other.Iterator().get(), ADD);
   DCHECK(success);
 }
@@ -106,39 +97,32 @@
   if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
     return false;
 
-  meta_->sum += sum;
-
-  HistogramBase::Count old_redundant_count =
-      subtle::NoBarrier_Load(&meta_->redundant_count);
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-                          old_redundant_count + redundant_count);
+  IncreaseSum(sum);
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+                                    redundant_count);
 
   SampleCountPickleIterator pickle_iter(iter);
   return AddSubtractImpl(&pickle_iter, ADD);
 }
 
 void HistogramSamples::Subtract(const HistogramSamples& other) {
-  meta_->sum -= other.sum();
-
-  HistogramBase::Count old_redundant_count =
-      subtle::NoBarrier_Load(&meta_->redundant_count);
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-                          old_redundant_count - other.redundant_count());
+  IncreaseSum(-other.sum());
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+                                    -other.redundant_count());
   bool success = AddSubtractImpl(other.Iterator().get(), SUBTRACT);
   DCHECK(success);
 }
 
 bool HistogramSamples::Serialize(Pickle* pickle) const {
-  if (!pickle->WriteInt64(meta_->sum))
+  if (!pickle->WriteInt64(sum()))
     return false;
-  if (!pickle->WriteInt(subtle::NoBarrier_Load(&meta_->redundant_count)))
+  if (!pickle->WriteInt(redundant_count()))
     return false;
 
   HistogramBase::Sample min;
   HistogramBase::Sample max;
   HistogramBase::Count count;
-  for (scoped_ptr<SampleCountIterator> it = Iterator();
-       !it->Done();
+  for (std::unique_ptr<SampleCountIterator> it = Iterator(); !it->Done();
        it->Next()) {
     it->Get(&min, &max, &count);
     if (!pickle->WriteInt(min) ||
@@ -150,17 +134,20 @@
 }
 
 void HistogramSamples::IncreaseSum(int64_t diff) {
+#ifdef ARCH_CPU_64_BITS
+  subtle::NoBarrier_AtomicIncrement(&meta_->sum, diff);
+#else
   meta_->sum += diff;
+#endif
 }
 
 void HistogramSamples::IncreaseRedundantCount(HistogramBase::Count diff) {
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-      subtle::NoBarrier_Load(&meta_->redundant_count) + diff);
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, diff);
 }
 
 SampleCountIterator::~SampleCountIterator() {}
 
-bool SampleCountIterator::GetBucketIndex(size_t* /* index */) const {
+bool SampleCountIterator::GetBucketIndex(size_t* /*index*/) const {
   DCHECK(!Done());
   return false;
 }
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
index 3da3e2d..e28573f 100644
--- a/base/metrics/histogram_samples.h
+++ b/base/metrics/histogram_samples.h
@@ -8,9 +8,10 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
+
 #include "base/atomicops.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 
 namespace base {
@@ -19,7 +20,10 @@
 class PickleIterator;
 class SampleCountIterator;
 
-// HistogramSamples is a container storing all samples of a histogram.
+// HistogramSamples is a container storing all samples of a histogram. All
+// elements must be of a fixed width to ensure 32/64-bit interoperability.
+// If this structure changes, bump the version number for kTypeIdHistogram
+// in persistent_histogram_allocator.cc.
 class BASE_EXPORT HistogramSamples {
  public:
   struct Metadata {
@@ -34,8 +38,13 @@
     // accuracy of this value; there may be races during histogram
     // accumulation and snapshotting that we choose to accept. It should
     // be treated as approximate.
-    // TODO(bcwhite): Change this to std::atomic<int64_t>.
+#ifdef ARCH_CPU_64_BITS
+    subtle::Atomic64 sum;
+#else
+    // 32-bit systems don't have atomic 64-bit operations. Use a basic type
+    // and don't worry about "shearing".
     int64_t sum;
+#endif
 
     // A "redundant" count helps identify memory corruption. It redundantly
     // stores the total number of samples accumulated in the histogram. We
@@ -65,12 +74,18 @@
 
   virtual void Subtract(const HistogramSamples& other);
 
-  virtual scoped_ptr<SampleCountIterator> Iterator() const = 0;
+  virtual std::unique_ptr<SampleCountIterator> Iterator() const = 0;
   virtual bool Serialize(Pickle* pickle) const;
 
   // Accessor fuctions.
   uint64_t id() const { return meta_->id; }
-  int64_t sum() const { return meta_->sum; }
+  int64_t sum() const {
+#ifdef ARCH_CPU_64_BITS
+    return subtle::NoBarrier_Load(&meta_->sum);
+#else
+    return meta_->sum;
+#endif
+  }
   HistogramBase::Count redundant_count() const {
     return subtle::NoBarrier_Load(&meta_->redundant_count);
   }
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
index 02f87f0..340505e 100644
--- a/base/metrics/histogram_snapshot_manager.cc
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -4,7 +4,9 @@
 
 #include "base/metrics/histogram_snapshot_manager.h"
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
+#include "base/debug/alias.h"
 #include "base/metrics/histogram_flattener.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/statistics_recorder.h"
@@ -19,77 +21,72 @@
 }
 
 HistogramSnapshotManager::~HistogramSnapshotManager() {
-  STLDeleteValues(&logged_samples_);
 }
 
-void HistogramSnapshotManager::PrepareDeltas(
-    HistogramBase::Flags flag_to_set,
-    HistogramBase::Flags required_flags) {
-  StatisticsRecorder::Histograms histograms;
-  StatisticsRecorder::GetHistograms(&histograms);
-  for (StatisticsRecorder::Histograms::const_iterator it = histograms.begin();
-       histograms.end() != it;
-       ++it) {
-    (*it)->SetFlags(flag_to_set);
-    if (((*it)->flags() & required_flags) == required_flags)
-      PrepareDelta(**it);
-  }
+void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
+  PrepareSamples(histogram, histogram->SnapshotDelta());
 }
 
-void HistogramSnapshotManager::PrepareDelta(const HistogramBase& histogram) {
+void HistogramSnapshotManager::PrepareFinalDelta(
+    const HistogramBase* histogram) {
+  PrepareSamples(histogram, histogram->SnapshotFinalDelta());
+}
+
+void HistogramSnapshotManager::PrepareSamples(
+    const HistogramBase* histogram,
+    std::unique_ptr<HistogramSamples> samples) {
   DCHECK(histogram_flattener_);
 
-  // Get up-to-date snapshot of sample stats.
-  scoped_ptr<HistogramSamples> snapshot(histogram.SnapshotSamples());
+  // Get information known about this histogram. If it did not previously
+  // exist, one will be created and initialized.
+  SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
 
   // Crash if we detect that our histograms have been overwritten.  This may be
   // a fair distance from the memory smasher, but we hope to correlate these
   // crashes with other events, such as plugins, or usage patterns, etc.
-  int corruption = histogram.FindCorruption(*snapshot);
+  uint32_t corruption = histogram->FindCorruption(*samples);
   if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
+    // Extract fields useful during debug.
+    const BucketRanges* ranges =
+        static_cast<const Histogram*>(histogram)->bucket_ranges();
+    std::vector<HistogramBase::Sample> ranges_copy;
+    for (size_t i = 0; i < ranges->size(); ++i)
+      ranges_copy.push_back(ranges->range(i));
+    HistogramBase::Sample* ranges_ptr = &ranges_copy[0];
+    const char* histogram_name = histogram->histogram_name().c_str();
+    int32_t flags = histogram->flags();
     // The checksum should have caught this, so crash separately if it didn't.
-    CHECK_NE(0, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+    CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
     CHECK(false);  // Crash for the bucket order corruption.
+    // Ensure that compiler keeps around pointers to |histogram| and its
+    // internal |bucket_ranges_| for any minidumps.
+    base::debug::Alias(&ranges_ptr);
+    base::debug::Alias(&histogram_name);
+    base::debug::Alias(&flags);
   }
   // Checksum corruption might not have caused order corruption.
-  CHECK_EQ(0, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+  CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
 
   // Note, at this point corruption can only be COUNT_HIGH_ERROR or
   // COUNT_LOW_ERROR and they never arise together, so we don't need to extract
   // bits from corruption.
-  const uint64_t histogram_hash = histogram.name_hash();
   if (corruption) {
-    DLOG(ERROR) << "Histogram: " << histogram.histogram_name()
-                << " has data corruption: " << corruption;
+    DLOG(ERROR) << "Histogram: \"" << histogram->histogram_name()
+                << "\" has data corruption: " << corruption;
     histogram_flattener_->InconsistencyDetected(
         static_cast<HistogramBase::Inconsistency>(corruption));
     // Don't record corrupt data to metrics services.
-    int old_corruption = inconsistencies_[histogram_hash];
+    const uint32_t old_corruption = sample_info->inconsistencies;
     if (old_corruption == (corruption | old_corruption))
       return;  // We've already seen this corruption for this histogram.
-    inconsistencies_[histogram_hash] |= corruption;
+    sample_info->inconsistencies |= corruption;
     histogram_flattener_->UniqueInconsistencyDetected(
         static_cast<HistogramBase::Inconsistency>(corruption));
     return;
   }
 
-  HistogramSamples* to_log;
-  auto it = logged_samples_.find(histogram_hash);
-  if (it == logged_samples_.end()) {
-    to_log = snapshot.release();
-
-    // This histogram has not been logged before, add a new entry.
-    logged_samples_[histogram_hash] = to_log;
-  } else {
-    HistogramSamples* already_logged = it->second;
-    InspectLoggedSamplesInconsistency(*snapshot, already_logged);
-    snapshot->Subtract(*already_logged);
-    already_logged->Add(*snapshot);
-    to_log = snapshot.get();
-  }
-
-  if (to_log->TotalCount() > 0)
-    histogram_flattener_->RecordDelta(histogram, *to_log);
+  if (samples->TotalCount() > 0)
+    histogram_flattener_->RecordDelta(*histogram, *samples);
 }
 
 void HistogramSnapshotManager::InspectLoggedSamplesInconsistency(
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
index bad4668..26fb93f 100644
--- a/base/metrics/histogram_snapshot_manager.h
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -9,7 +9,9 @@
 
 #include <map>
 #include <string>
+#include <vector>
 
+#include "base/gtest_prod_util.h"
 #include "base/macros.h"
 #include "base/metrics/histogram_base.h"
 
@@ -36,26 +38,51 @@
   // Only histograms that have all the flags specified by the argument will be
   // chosen. If all histograms should be recorded, set it to
   // |Histogram::kNoFlags|.
-  void PrepareDeltas(HistogramBase::Flags flags_to_set,
-                     HistogramBase::Flags required_flags);
+  template <class ForwardHistogramIterator>
+  void PrepareDeltas(ForwardHistogramIterator begin,
+                     ForwardHistogramIterator end,
+                     HistogramBase::Flags flags_to_set,
+                     HistogramBase::Flags required_flags) {
+    for (ForwardHistogramIterator it = begin; it != end; ++it) {
+      (*it)->SetFlags(flags_to_set);
+      if (((*it)->flags() & required_flags) == required_flags)
+        PrepareDelta(*it);
+    }
+  }
+
+  // When the collection is not so simple as can be done using a single
+  // iterator, the steps can be performed separately. Call PerpareDelta()
+  // as many times as necessary. PrepareFinalDelta() works like PrepareDelta()
+  // except that it does not update the previous logged values and can thus
+  // be used with read-only files.
+  void PrepareDelta(HistogramBase* histogram);
+  void PrepareFinalDelta(const HistogramBase* histogram);
 
  private:
-  // Snapshot this histogram, and record the delta.
-  void PrepareDelta(const HistogramBase& histogram);
+  FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
+
+  // During a snapshot, samples are acquired and aggregated. This structure
+  // contains all the information for a given histogram that persists between
+  // collections.
+  struct SampleInfo {
+    // The set of inconsistencies (flags) already seen for the histogram.
+    // See HistogramBase::Inconsistency for values.
+    uint32_t inconsistencies = 0;
+  };
+
+  // Capture and hold samples from a histogram. This does all the heavy
+  // lifting for PrepareDelta() and PrepareAbsolute().
+  void PrepareSamples(const HistogramBase* histogram,
+                      std::unique_ptr<HistogramSamples> samples);
 
   // Try to detect and fix count inconsistency of logged samples.
   void InspectLoggedSamplesInconsistency(
       const HistogramSamples& new_snapshot,
       HistogramSamples* logged_samples);
 
-  // For histograms, track what we've already recorded (as a sample for
-  // each histogram) so that we can record only the delta with the next log.
-  // The information is indexed by the hash of the histogram name.
-  std::map<uint64_t, HistogramSamples*> logged_samples_;
-
-  // Set of histograms found to be corrupt and their problems, indexed
+  // For histograms, track what has been previously seen, indexed
   // by the hash of the histogram name.
-  std::map<uint64_t, int> inconsistencies_;
+  std::map<uint64_t, SampleInfo> known_histograms_;
 
   // |histogram_flattener_| handles the logistics of recording the histogram
   // deltas.
diff --git a/base/metrics/histogram_snapshot_manager_unittest.cc b/base/metrics/histogram_snapshot_manager_unittest.cc
index b6a367a..3c13e1a 100644
--- a/base/metrics/histogram_snapshot_manager_unittest.cc
+++ b/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -10,7 +10,9 @@
 #include "base/macros.h"
 #include "base/metrics/histogram_delta_serialization.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
+#include "base/stl_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -22,6 +24,12 @@
   void RecordDelta(const HistogramBase& histogram,
                    const HistogramSamples& snapshot) override {
     recorded_delta_histogram_names_.push_back(histogram.histogram_name());
+    // Use CHECK instead of ASSERT to get full stack-trace and thus origin.
+    CHECK(!ContainsKey(recorded_delta_histogram_sum_,
+                       histogram.histogram_name()));
+    // Keep pointer to snapshot for testing. This really isn't ideal but the
+    // snapshot-manager keeps the snapshot alive until it's "forgotten".
+    recorded_delta_histogram_sum_[histogram.histogram_name()] = snapshot.sum();
   }
 
   void InconsistencyDetected(HistogramBase::Inconsistency problem) override {
@@ -37,12 +45,23 @@
     ASSERT_TRUE(false);
   }
 
+  void Reset() {
+    recorded_delta_histogram_names_.clear();
+    recorded_delta_histogram_sum_.clear();
+  }
+
   std::vector<std::string> GetRecordedDeltaHistogramNames() {
     return recorded_delta_histogram_names_;
   }
 
+  int64_t GetRecordedDeltaHistogramSum(const std::string& name) {
+    EXPECT_TRUE(ContainsKey(recorded_delta_histogram_sum_, name));
+    return recorded_delta_histogram_sum_[name];
+  }
+
  private:
   std::vector<std::string> recorded_delta_histogram_names_;
+  std::map<std::string, int64_t> recorded_delta_histogram_sum_;
 
   DISALLOW_COPY_AND_ASSIGN(HistogramFlattenerDeltaRecorder);
 };
@@ -50,22 +69,24 @@
 class HistogramSnapshotManagerTest : public testing::Test {
  protected:
   HistogramSnapshotManagerTest()
-      : histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
+      : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
+        histogram_snapshot_manager_(&histogram_flattener_delta_recorder_) {}
 
   ~HistogramSnapshotManagerTest() override {}
 
-  StatisticsRecorder statistics_recorder_;
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
   HistogramFlattenerDeltaRecorder histogram_flattener_delta_recorder_;
   HistogramSnapshotManager histogram_snapshot_manager_;
 };
 
 TEST_F(HistogramSnapshotManagerTest, PrepareDeltasNoFlagsFilter) {
   // kNoFlags filter should record all histograms.
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 2);
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
   UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
 
-  histogram_snapshot_manager_.PrepareDeltas(HistogramBase::kNoFlags,
-                                            HistogramBase::kNoFlags);
+  histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(false), StatisticsRecorder::end(),
+      HistogramBase::kNoFlags, HistogramBase::kNoFlags);
 
   const std::vector<std::string>& histograms =
       histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
@@ -76,10 +97,11 @@
 
 TEST_F(HistogramSnapshotManagerTest, PrepareDeltasUmaHistogramFlagFilter) {
   // Note that kUmaStabilityHistogramFlag includes kUmaTargetedHistogramFlag.
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 2);
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
   UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
 
   histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(false), StatisticsRecorder::end(),
       HistogramBase::kNoFlags, HistogramBase::kUmaTargetedHistogramFlag);
 
   const std::vector<std::string>& histograms =
@@ -91,10 +113,11 @@
 
 TEST_F(HistogramSnapshotManagerTest,
        PrepareDeltasUmaStabilityHistogramFlagFilter) {
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 2);
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
   UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
 
   histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(false), StatisticsRecorder::end(),
       HistogramBase::kNoFlags, HistogramBase::kUmaStabilityHistogramFlag);
 
   const std::vector<std::string>& histograms =
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index 2fadc30..5c2ca68 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -8,46 +8,93 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <algorithm>
 #include <climits>
+#include <memory>
+#include <string>
 #include <vector>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
+#include "base/strings/stringprintf.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
-class HistogramTest : public testing::Test {
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class HistogramTest : public testing::TestWithParam<bool> {
  protected:
+  const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
+
+  HistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
   void SetUp() override {
+    if (use_persistent_histogram_allocator_)
+      CreatePersistentHistogramAllocator();
+
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     InitializeStatisticsRecorder();
   }
 
-  void TearDown() override { UninitializeStatisticsRecorder(); }
+  void TearDown() override {
+    if (allocator_) {
+      ASSERT_FALSE(allocator_->IsFull());
+      ASSERT_FALSE(allocator_->IsCorrupt());
+    }
+    UninitializeStatisticsRecorder();
+    DestroyPersistentHistogramAllocator();
+  }
 
   void InitializeStatisticsRecorder() {
-    statistics_recorder_ = new StatisticsRecorder();
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
   }
 
   void UninitializeStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = NULL;
+    statistics_recorder_.reset();
   }
 
-  StatisticsRecorder* statistics_recorder_;
+  void CreatePersistentHistogramAllocator() {
+    // By getting the results-histogram before any persistent allocator
+    // is attached, that histogram is guaranteed not to be stored in
+    // any persistent memory segment (which simplifies some tests).
+    GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
+
+    GlobalHistogramAllocator::CreateWithLocalMemory(
+        kAllocatorMemorySize, 0, "HistogramAllocatorTest");
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+  }
+
+  void DestroyPersistentHistogramAllocator() {
+    allocator_ = nullptr;
+    GlobalHistogramAllocator::ReleaseForTesting();
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<char[]> allocator_memory_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HistogramTest);
 };
 
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent, HistogramTest, testing::Bool());
+
+
 // Check for basic syntax and use.
-TEST_F(HistogramTest, BasicTest) {
+TEST_P(HistogramTest, BasicTest) {
   // Try basic construction
   HistogramBase* histogram = Histogram::FactoryGet(
       "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
@@ -64,6 +111,14 @@
       "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
   EXPECT_TRUE(custom_histogram);
 
+  // Macros that create hitograms have an internal static variable which will
+  // continue to point to those from the very first run of this method even
+  // during subsequent runs.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
   // Use standard macros (but with fixed samples)
   LOCAL_HISTOGRAM_TIMES("Test2Histogram", TimeDelta::FromDays(1));
   LOCAL_HISTOGRAM_COUNTS("Test3Histogram", 30);
@@ -73,18 +128,81 @@
 
 // Check that the macro correctly matches histograms by name and records their
 // data together.
-TEST_F(HistogramTest, NameMatchTest) {
+TEST_P(HistogramTest, NameMatchTest) {
+  // Macros that create hitograms have an internal static variable which will
+  // continue to point to those from the very first run of this method even
+  // during subsequent runs.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
   LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
   LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
   HistogramBase* histogram = LinearHistogram::FactoryGet(
       "DuplicatedHistogram", 1, 101, 102, HistogramBase::kNoFlags);
 
-  scoped_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
   EXPECT_EQ(2, samples->TotalCount());
   EXPECT_EQ(2, samples->GetCount(10));
 }
 
-TEST_F(HistogramTest, ExponentialRangesTest) {
+// Check that delta calculations work correctly.
+TEST_P(HistogramTest, DeltaTest) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
+                            HistogramBase::kNoFlags);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+
+  histogram->Add(10);
+  histogram->Add(10);
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+  EXPECT_EQ(2, samples->GetCount(10));
+
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+}
+
+// Check that final-delta calculations work correctly.
+TEST_P(HistogramTest, FinalDeltaTest) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet("FinalDeltaHistogram", 1, 64, 8,
+                            HistogramBase::kNoFlags);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+  histogram->Add(2);
+  histogram->Add(50);
+
+  samples = histogram->SnapshotFinalDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(2));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+}
+
+TEST_P(HistogramTest, ExponentialRangesTest) {
   // Check that we got a nice exponential when there was enough room.
   BucketRanges ranges(9);
   Histogram::InitializeBucketRanges(1, 64, &ranges);
@@ -129,7 +247,7 @@
   EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
 }
 
-TEST_F(HistogramTest, LinearRangesTest) {
+TEST_P(HistogramTest, LinearRangesTest) {
   BucketRanges ranges(9);
   LinearHistogram::InitializeBucketRanges(1, 7, &ranges);
   // Gets a nice linear set of bucket ranges.
@@ -157,7 +275,7 @@
   EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
 }
 
-TEST_F(HistogramTest, ArrayToCustomRangesTest) {
+TEST_P(HistogramTest, ArrayToCustomRangesTest) {
   const HistogramBase::Sample ranges[3] = {5, 10, 20};
   std::vector<HistogramBase::Sample> ranges_vec =
       CustomHistogram::ArrayToCustomRanges(ranges, 3);
@@ -170,7 +288,7 @@
   EXPECT_EQ(21, ranges_vec[5]);
 }
 
-TEST_F(HistogramTest, CustomHistogramTest) {
+TEST_P(HistogramTest, CustomHistogramTest) {
   // A well prepared custom ranges.
   std::vector<HistogramBase::Sample> custom_ranges;
   custom_ranges.push_back(1);
@@ -216,7 +334,7 @@
   EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3));
 }
 
-TEST_F(HistogramTest, CustomHistogramWithOnly2Buckets) {
+TEST_P(HistogramTest, CustomHistogramWithOnly2Buckets) {
   // This test exploits the fact that the CustomHistogram can have 2 buckets,
   // while the base class Histogram is *supposed* to have at least 3 buckets.
   // We should probably change the restriction on the base class (or not inherit
@@ -235,8 +353,7 @@
   EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(2));
 }
 
-// Test the AddCount function.
-TEST_F(HistogramTest, AddCountTest) {
+TEST_P(HistogramTest, AddCountTest) {
   const size_t kBucketCount = 50;
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("AddCountHistogram", 10, 100, kBucketCount,
@@ -245,7 +362,7 @@
   histogram->AddCount(20, 15);
   histogram->AddCount(30, 14);
 
-  scoped_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
   EXPECT_EQ(29, samples->TotalCount());
   EXPECT_EQ(15, samples->GetCount(20));
   EXPECT_EQ(14, samples->GetCount(30));
@@ -253,14 +370,38 @@
   histogram->AddCount(20, 25);
   histogram->AddCount(30, 24);
 
-  scoped_ptr<SampleVector> samples2 = histogram->SnapshotSampleVector();
+  std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
   EXPECT_EQ(78, samples2->TotalCount());
   EXPECT_EQ(40, samples2->GetCount(20));
   EXPECT_EQ(38, samples2->GetCount(30));
 }
 
+TEST_P(HistogramTest, AddCount_LargeValuesDontOverflow) {
+  const size_t kBucketCount = 50;
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("AddCountHistogram", 10, 1000000000, kBucketCount,
+                            HistogramBase::kNoFlags));
+
+  histogram->AddCount(200000000, 15);
+  histogram->AddCount(300000000, 14);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  EXPECT_EQ(29, samples->TotalCount());
+  EXPECT_EQ(15, samples->GetCount(200000000));
+  EXPECT_EQ(14, samples->GetCount(300000000));
+
+  histogram->AddCount(200000000, 25);
+  histogram->AddCount(300000000, 24);
+
+  std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+  EXPECT_EQ(78, samples2->TotalCount());
+  EXPECT_EQ(40, samples2->GetCount(200000000));
+  EXPECT_EQ(38, samples2->GetCount(300000000));
+  EXPECT_EQ(19400000000LL, samples2->sum());
+}
+
 // Make sure histogram handles out-of-bounds data gracefully.
-TEST_F(HistogramTest, BoundsTest) {
+TEST_P(HistogramTest, BoundsTest) {
   const size_t kBucketCount = 50;
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Bounded", 10, 100, kBucketCount,
@@ -274,7 +415,7 @@
   histogram->Add(10000);
 
   // Verify they landed in the underflow, and overflow buckets.
-  scoped_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+  std::unique_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
   EXPECT_EQ(2, samples->GetCountAtIndex(0));
   EXPECT_EQ(0, samples->GetCountAtIndex(1));
   size_t array_size = histogram->bucket_count();
@@ -298,7 +439,7 @@
   test_custom_histogram->Add(INT_MAX);
 
   // Verify they landed in the underflow, and overflow buckets.
-  scoped_ptr<SampleVector> custom_samples =
+  std::unique_ptr<SampleVector> custom_samples =
       test_custom_histogram->SnapshotSampleVector();
   EXPECT_EQ(2, custom_samples->GetCountAtIndex(0));
   EXPECT_EQ(0, custom_samples->GetCountAtIndex(1));
@@ -308,7 +449,7 @@
 }
 
 // Check to be sure samples land as expected is "correct" buckets.
-TEST_F(HistogramTest, BucketPlacementTest) {
+TEST_P(HistogramTest, BucketPlacementTest) {
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
@@ -322,12 +463,19 @@
   }
 
   // Check to see that the bucket counts reflect our additions.
-  scoped_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+  std::unique_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
   for (int i = 0; i < 8; i++)
     EXPECT_EQ(i + 1, samples->GetCountAtIndex(i));
 }
 
-TEST_F(HistogramTest, CorruptSampleCounts) {
+TEST_P(HistogramTest, CorruptSampleCounts) {
+  // The internal code creates histograms via macros and thus keeps static
+  // pointers to them. If those pointers are to persistent memory which will
+  // be free'd then any following calls to that code will crash with a
+  // segmentation violation.
+  if (use_persistent_histogram_allocator_)
+    return;
+
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
@@ -335,7 +483,7 @@
   histogram->Add(20);
   histogram->Add(40);
 
-  scoped_ptr<SampleVector> snapshot = histogram->SnapshotSampleVector();
+  std::unique_ptr<SampleVector> snapshot = histogram->SnapshotSampleVector();
   EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
             histogram->FindCorruption(*snapshot));
   EXPECT_EQ(2, snapshot->redundant_count());
@@ -354,11 +502,11 @@
             histogram->FindCorruption(*snapshot));
 }
 
-TEST_F(HistogramTest, CorruptBucketBounds) {
+TEST_P(HistogramTest, CorruptBucketBounds) {
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
-  scoped_ptr<SampleVector> snapshot = histogram->SnapshotSampleVector();
+  std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotSamples();
   EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
             histogram->FindCorruption(*snapshot));
 
@@ -373,7 +521,7 @@
 
   bucket_ranges->set_range(2, bucket_ranges->range(1));
   bucket_ranges->set_range(1, tmp);
-  EXPECT_EQ(0, histogram->FindCorruption(*snapshot));
+  EXPECT_EQ(0U, histogram->FindCorruption(*snapshot));
 
   // Show that two simple changes don't offset each other
   bucket_ranges->set_range(3, bucket_ranges->range(3) + 1);
@@ -389,7 +537,7 @@
   bucket_ranges->set_range(4, bucket_ranges->range(4) + 1);
 }
 
-TEST_F(HistogramTest, HistogramSerializeInfo) {
+TEST_P(HistogramTest, HistogramSerializeInfo) {
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8,
                             HistogramBase::kIPCSerializationSourceFlag));
@@ -408,7 +556,8 @@
 
   int flag;
   EXPECT_TRUE(iter.ReadInt(&flag));
-  EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag, flag);
+  EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag,
+            flag & ~HistogramBase::kIsPersistent);
 
   int min;
   EXPECT_TRUE(iter.ReadInt(&min));
@@ -418,9 +567,9 @@
   EXPECT_TRUE(iter.ReadInt(&max));
   EXPECT_EQ(64, max);
 
-  int64_t bucket_count;
-  EXPECT_TRUE(iter.ReadInt64(&bucket_count));
-  EXPECT_EQ(8, bucket_count);
+  uint32_t bucket_count;
+  EXPECT_TRUE(iter.ReadUInt32(&bucket_count));
+  EXPECT_EQ(8u, bucket_count);
 
   uint32_t checksum;
   EXPECT_TRUE(iter.ReadUInt32(&checksum));
@@ -430,7 +579,7 @@
   EXPECT_FALSE(iter.SkipBytes(1));
 }
 
-TEST_F(HistogramTest, CustomHistogramSerializeInfo) {
+TEST_P(HistogramTest, CustomHistogramSerializeInfo) {
   std::vector<int> custom_ranges;
   custom_ranges.push_back(10);
   custom_ranges.push_back(100);
@@ -447,12 +596,12 @@
 
   int i;
   std::string s;
-  int64_t bucket_count;
+  uint32_t bucket_count;
   uint32_t ui32;
   EXPECT_TRUE(iter.ReadInt(&i) && iter.ReadString(&s) && iter.ReadInt(&i) &&
               iter.ReadInt(&i) && iter.ReadInt(&i) &&
-              iter.ReadInt64(&bucket_count) && iter.ReadUInt32(&ui32));
-  EXPECT_EQ(3, bucket_count);
+              iter.ReadUInt32(&bucket_count) && iter.ReadUInt32(&ui32));
+  EXPECT_EQ(3u, bucket_count);
 
   int range;
   EXPECT_TRUE(iter.ReadInt(&range));
@@ -464,7 +613,7 @@
   EXPECT_FALSE(iter.SkipBytes(1));
 }
 
-TEST_F(HistogramTest, BadConstruction) {
+TEST_P(HistogramTest, BadConstruction) {
   HistogramBase* histogram = Histogram::FactoryGet(
       "BadConstruction", 0, 100, 8, HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram->HasConstructionArguments(1, 100, 8));
@@ -490,6 +639,68 @@
   EXPECT_EQ(NULL, bad_histogram);
 }
 
+TEST_P(HistogramTest, FactoryTime) {
+  const int kTestCreateCount = 1 << 14;  // Must be power-of-2.
+  const int kTestLookupCount = 100000;
+  const int kTestAddCount = 1000000;
+
+  // Create all histogram names in advance for accurate timing below.
+  std::vector<std::string> histogram_names;
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    histogram_names.push_back(
+        StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+  }
+
+  // Calculate cost of creating histograms.
+  TimeTicks create_start = TimeTicks::Now();
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    Histogram::FactoryGet(histogram_names[i], 1, 100, 10,
+                          HistogramBase::kNoFlags);
+  }
+  TimeDelta create_ticks = TimeTicks::Now() - create_start;
+  int64_t create_ms = create_ticks.InMilliseconds();
+
+  VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+          << "ms or about "
+          << (create_ms * 1000000) / kTestCreateCount
+          << "ns each.";
+
+  // Calculate cost of looking up existing histograms.
+  TimeTicks lookup_start = TimeTicks::Now();
+  for (int i = 0; i < kTestLookupCount; ++i) {
+    // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+    // order less likely to be cacheable (but still hit them all) should the
+    // underlying storage use the exact histogram name as the key.
+    const int i_mult = 6007;
+    static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+    int index = (i * i_mult) & (kTestCreateCount - 1);
+    Histogram::FactoryGet(histogram_names[index], 1, 100, 10,
+                          HistogramBase::kNoFlags);
+  }
+  TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+  int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+  VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+          << "ms or about "
+          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ns each.";
+
+  // Calculate cost of accessing histograms.
+  HistogramBase* histogram = Histogram::FactoryGet(
+      histogram_names[0], 1, 100, 10, HistogramBase::kNoFlags);
+  ASSERT_TRUE(histogram);
+  TimeTicks add_start = TimeTicks::Now();
+  for (int i = 0; i < kTestAddCount; ++i)
+    histogram->Add(i & 127);
+  TimeDelta add_ticks = TimeTicks::Now() - add_start;
+  int64_t add_ms = add_ticks.InMilliseconds();
+
+  VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+          << "ms or about "
+          << (add_ms * 1000000) / kTestAddCount
+          << "ns each.";
+}
+
 #if GTEST_HAS_DEATH_TEST
 // For Histogram, LinearHistogram and CustomHistogram, the minimum for a
 // declared range is 1, while the maximum is (HistogramBase::kSampleType_MAX -
diff --git a/base/metrics/metrics_hashes.cc b/base/metrics/metrics_hashes.cc
index 73bce2e..5672b06 100644
--- a/base/metrics/metrics_hashes.cc
+++ b/base/metrics/metrics_hashes.cc
@@ -22,9 +22,9 @@
 
 }  // namespace
 
-uint64_t HashMetricName(const std::string& name) {
+uint64_t HashMetricName(base::StringPiece name) {
   base::MD5Digest digest;
-  base::MD5Sum(name.c_str(), name.size(), &digest);
+  base::MD5Sum(name.data(), name.size(), &digest);
   return DigestToUInt64(digest);
 }
 
diff --git a/base/metrics/metrics_hashes.h b/base/metrics/metrics_hashes.h
index bd04017..d05c4ba 100644
--- a/base/metrics/metrics_hashes.h
+++ b/base/metrics/metrics_hashes.h
@@ -6,15 +6,15 @@
 #define BASE_METRICS_METRICS_HASHES_H_
 
 #include <stdint.h>
-#include <string>
 
 #include "base/base_export.h"
+#include "base/strings/string_piece.h"
 
 namespace base {
 
 // Computes a uint64_t hash of a given string based on its MD5 hash. Suitable
 // for metric names.
-BASE_EXPORT uint64_t HashMetricName(const std::string& name);
+BASE_EXPORT uint64_t HashMetricName(base::StringPiece name);
 
 }  // namespace metrics
 
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
new file mode 100644
index 0000000..5af3486
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -0,0 +1,866 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include <memory>
+
+#include "base/files/file_path.h"
+#include "base/files/file_util.h"
+#include "base/files/important_file_writer.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_sample_map.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+namespace {
+
+// Name of histogram for storing results of local operations.
+const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
+
+// Type identifiers used when storing in persistent memory so they can be
+// identified during extraction; the first 4 bytes of the SHA1 of the name
+// is used as a unique integer. A "version number" is added to the base
+// so that, if the structure of that object changes, stored older versions
+// will be safely ignored.
+enum : uint32_t {
+  kTypeIdHistogram   = 0xF1645910 + 2,  // SHA1(Histogram)   v2
+  kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
+  kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
+};
+
+// The current globally-active persistent allocator for all new histograms.
+// The object held here will obviously not be destructed at process exit
+// but that's best since PersistentMemoryAllocator objects (that underlie
+// GlobalHistogramAllocator objects) are explicitly forbidden from doing
+// anything essential at exit anyway due to the fact that they depend on data
+// managed elsewhere and which could be destructed first.
+GlobalHistogramAllocator* g_allocator = nullptr;
+
+// Take an array of range boundaries and create a proper BucketRanges object
+// which is returned to the caller. A return of nullptr indicates that the
+// passed boundaries are invalid.
+std::unique_ptr<BucketRanges> CreateRangesFromData(
+    HistogramBase::Sample* ranges_data,
+    uint32_t ranges_checksum,
+    size_t count) {
+  // To avoid racy destruction at shutdown, the following may be leaked.
+  std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
+  DCHECK_EQ(count, ranges->size());
+  for (size_t i = 0; i < count; ++i) {
+    if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
+      return nullptr;
+    ranges->set_range(i, ranges_data[i]);
+  }
+
+  ranges->ResetChecksum();
+  if (ranges->checksum() != ranges_checksum)
+    return nullptr;
+
+  return ranges;
+}
+
+// Calculate the number of bytes required to store all of a histogram's
+// "counts". This will return zero (0) if |bucket_count| is not valid.
+size_t CalculateRequiredCountsBytes(size_t bucket_count) {
+  // 2 because each "sample count" also requires a backup "logged count"
+  // used for calculating the delta during snapshot operations.
+  const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
+
+  // If the |bucket_count| is such that it would overflow the return type,
+  // perhaps as the result of a malicious actor, then return zero to
+  // indicate the problem to the caller.
+  if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
+    return 0;
+
+  return bucket_count * kBytesPerBucket;
+}
+
+}  // namespace
+
+const Feature kPersistentHistogramsFeature{
+  "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
+};
+
+
+PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
+    PersistentMemoryAllocator* allocator)
+    : allocator_(allocator), record_iterator_(allocator) {}
+
+PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() {}
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
+                                                          const void* user) {
+  base::AutoLock auto_lock(lock_);
+  return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
+}
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
+    uint64_t id) {
+  lock_.AssertAcquired();
+
+  auto found = sample_records_.find(id);
+  if (found != sample_records_.end())
+    return found->second.get();
+
+  std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
+  samples = WrapUnique(new PersistentSampleMapRecords(this, id));
+  return samples.get();
+}
+
+bool PersistentSparseHistogramDataManager::LoadRecords(
+    PersistentSampleMapRecords* sample_map_records) {
+  // DataManager must be locked in order to access the found_ field of any
+  // PersistentSampleMapRecords object.
+  base::AutoLock auto_lock(lock_);
+  bool found = false;
+
+  // If there are already "found" entries for the passed object, move them.
+  if (!sample_map_records->found_.empty()) {
+    sample_map_records->records_.reserve(sample_map_records->records_.size() +
+                                         sample_map_records->found_.size());
+    sample_map_records->records_.insert(sample_map_records->records_.end(),
+                                        sample_map_records->found_.begin(),
+                                        sample_map_records->found_.end());
+    sample_map_records->found_.clear();
+    found = true;
+  }
+
+  // Acquiring a lock is a semi-expensive operation so load some records with
+  // each call. More than this number may be loaded if it takes longer to
+  // find at least one matching record for the passed object.
+  const int kMinimumNumberToLoad = 10;
+  const uint64_t match_id = sample_map_records->sample_map_id_;
+
+  // Loop while no enty is found OR we haven't yet loaded the minimum number.
+  // This will continue reading even after a match is found.
+  for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
+    // Get the next sample-record. The iterator will always resume from where
+    // it left off even if it previously had nothing further to return.
+    uint64_t found_id;
+    PersistentMemoryAllocator::Reference ref =
+        PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
+                                                     &found_id);
+
+    // Stop immediately if there are none.
+    if (!ref)
+      break;
+
+    // The sample-record could be for any sparse histogram. Add the reference
+    // to the appropriate collection for later use.
+    if (found_id == match_id) {
+      sample_map_records->records_.push_back(ref);
+      found = true;
+    } else {
+      PersistentSampleMapRecords* samples =
+          GetSampleMapRecordsWhileLocked(found_id);
+      DCHECK(samples);
+      samples->found_.push_back(ref);
+    }
+  }
+
+  return found;
+}
+
+
+PersistentSampleMapRecords::PersistentSampleMapRecords(
+    PersistentSparseHistogramDataManager* data_manager,
+    uint64_t sample_map_id)
+    : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
+
+PersistentSampleMapRecords::~PersistentSampleMapRecords() {}
+
+PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
+    const void* user) {
+  DCHECK(!user_);
+  user_ = user;
+  seen_ = 0;
+  return this;
+}
+
+void PersistentSampleMapRecords::Release(const void* user) {
+  DCHECK_EQ(user_, user);
+  user_ = nullptr;
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
+  DCHECK(user_);
+
+  // If there are no unseen records, lock and swap in all the found ones.
+  if (records_.size() == seen_) {
+    if (!data_manager_->LoadRecords(this))
+      return false;
+  }
+
+  // Return the next record. Records *must* be returned in the same order
+  // they are found in the persistent memory in order to ensure that all
+  // objects using this data always have the same state. Race conditions
+  // can cause duplicate records so using the "first found" is the only
+  // guarantee that all objects always access the same one.
+  DCHECK_LT(seen_, records_.size());
+  return records_[seen_++];
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
+    HistogramBase::Sample value) {
+  return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
+                                                     sample_map_id_, value);
+}
+
+
+// This data will be held in persistent memory in order for processes to
+// locate and use histograms created elsewhere.
+struct PersistentHistogramAllocator::PersistentHistogramData {
+  int32_t histogram_type;
+  int32_t flags;
+  int32_t minimum;
+  int32_t maximum;
+  uint32_t bucket_count;
+  PersistentMemoryAllocator::Reference ranges_ref;
+  uint32_t ranges_checksum;
+  PersistentMemoryAllocator::Reference counts_ref;
+  HistogramSamples::Metadata samples_metadata;
+  HistogramSamples::Metadata logged_metadata;
+
+  // Space for the histogram name will be added during the actual allocation
+  // request. This must be the last field of the structure. A zero-size array
+  // or a "flexible" array would be preferred but is not (yet) valid C++.
+  char name[1];
+};
+
+PersistentHistogramAllocator::Iterator::Iterator(
+    PersistentHistogramAllocator* allocator)
+    : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
+
+std::unique_ptr<HistogramBase>
+PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
+    if (ref != ignore)
+      return allocator_->GetHistogram(ref);
+  }
+  return nullptr;
+}
+
+
+PersistentHistogramAllocator::PersistentHistogramAllocator(
+    std::unique_ptr<PersistentMemoryAllocator> memory)
+    : memory_allocator_(std::move(memory)),
+      sparse_histogram_data_manager_(memory_allocator_.get()) {}
+
+PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
+    Reference ref) {
+  // Unfortunately, the histogram "pickle" methods cannot be used as part of
+  // the persistance because the deserialization methods always create local
+  // count data (while these must reference the persistent counts) and always
+  // add it to the local list of known histograms (while these may be simple
+  // references to histograms in other processes).
+  PersistentHistogramData* histogram_data =
+      memory_allocator_->GetAsObject<PersistentHistogramData>(
+          ref, kTypeIdHistogram);
+  size_t length = memory_allocator_->GetAllocSize(ref);
+  if (!histogram_data ||
+      reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
+    NOTREACHED();
+    return nullptr;
+  }
+  return CreateHistogram(histogram_data);
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
+    HistogramType histogram_type,
+    const std::string& name,
+    int minimum,
+    int maximum,
+    const BucketRanges* bucket_ranges,
+    int32_t flags,
+    Reference* ref_ptr) {
+  // If the allocator is corrupt, don't waste time trying anything else.
+  // This also allows differentiating on the dashboard between allocations
+  // failed due to a corrupt allocator and the number of process instances
+  // with one, the latter being idicated by "newly corrupt", below.
+  if (memory_allocator_->IsCorrupt()) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
+    return nullptr;
+  }
+
+  // Create the metadata necessary for a persistent sparse histogram. This
+  // is done first because it is a small subset of what is required for
+  // other histograms.
+  PersistentMemoryAllocator::Reference histogram_ref =
+      memory_allocator_->Allocate(
+          offsetof(PersistentHistogramData, name) + name.length() + 1,
+          kTypeIdHistogram);
+  PersistentHistogramData* histogram_data =
+      memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
+                                                              kTypeIdHistogram);
+  if (histogram_data) {
+    memcpy(histogram_data->name, name.c_str(), name.size() + 1);
+    histogram_data->histogram_type = histogram_type;
+    histogram_data->flags = flags | HistogramBase::kIsPersistent;
+  }
+
+  // Create the remaining metadata necessary for regular histograms.
+  if (histogram_type != SPARSE_HISTOGRAM) {
+    size_t bucket_count = bucket_ranges->bucket_count();
+    size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
+    if (counts_bytes == 0) {
+      // |bucket_count| was out-of-range.
+      NOTREACHED();
+      return nullptr;
+    }
+
+    size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
+    PersistentMemoryAllocator::Reference counts_ref =
+        memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
+    PersistentMemoryAllocator::Reference ranges_ref =
+        memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
+    HistogramBase::Sample* ranges_data =
+        memory_allocator_->GetAsObject<HistogramBase::Sample>(
+            ranges_ref, kTypeIdRangesArray);
+
+    // Only continue here if all allocations were successful. If they weren't,
+    // there is no way to free the space but that's not really a problem since
+    // the allocations only fail because the space is full or corrupt and so
+    // any future attempts will also fail.
+    if (counts_ref && ranges_data && histogram_data) {
+      for (size_t i = 0; i < bucket_ranges->size(); ++i)
+        ranges_data[i] = bucket_ranges->range(i);
+
+      histogram_data->minimum = minimum;
+      histogram_data->maximum = maximum;
+      // |bucket_count| must fit within 32-bits or the allocation of the counts
+      // array would have failed for being too large; the allocator supports
+      // less than 4GB total size.
+      histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
+      histogram_data->ranges_ref = ranges_ref;
+      histogram_data->ranges_checksum = bucket_ranges->checksum();
+      histogram_data->counts_ref = counts_ref;
+    } else {
+      histogram_data = nullptr;  // Clear this for proper handling below.
+    }
+  }
+
+  if (histogram_data) {
+    // Create the histogram using resources in persistent memory. This ends up
+    // resolving the "ref" values stored in histogram_data instad of just
+    // using what is already known above but avoids duplicating the switch
+    // statement here and serves as a double-check that everything is
+    // correct before commiting the new histogram to persistent space.
+    std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
+    DCHECK(histogram);
+    if (ref_ptr != nullptr)
+      *ref_ptr = histogram_ref;
+
+    // By storing the reference within the allocator to this histogram, the
+    // next import (which will happen before the next histogram creation)
+    // will know to skip it.
+    // See also the comment in ImportHistogramsToStatisticsRecorder().
+    subtle::NoBarrier_Store(&last_created_, histogram_ref);
+    return histogram;
+  }
+
+  CreateHistogramResultType result;
+  if (memory_allocator_->IsCorrupt()) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
+    result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
+  } else if (memory_allocator_->IsFull()) {
+    result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
+  } else {
+    result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
+  }
+  RecordCreateHistogramResult(result);
+  NOTREACHED() << "error=" << result;
+
+  return nullptr;
+}
+
+void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
+                                                     bool registered) {
+  // If the created persistent histogram was registered then it needs to
+  // be marked as "iterable" in order to be found by other processes.
+  if (registered)
+    memory_allocator_->MakeIterable(ref);
+  // If it wasn't registered then a race condition must have caused
+  // two to be created. The allocator does not support releasing the
+  // acquired memory so just change the type to be empty.
+  else
+    memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
+}
+
+void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
+    HistogramBase* histogram) {
+  DCHECK(histogram);
+
+  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+  if (!existing) {
+    // The above should never fail but if it does, no real harm is done.
+    // The data won't be merged but it also won't be recorded as merged
+    // so a future try, if successful, will get what was missed. If it
+    // continues to fail, some metric data will be lost but that is better
+    // than crashing.
+    NOTREACHED();
+    return;
+  }
+
+  // Merge the delta from the passed object to the one in the SR.
+  existing->AddSamples(*histogram->SnapshotDelta());
+}
+
+void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
+    const HistogramBase* histogram) {
+  DCHECK(histogram);
+
+  HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
+  if (!existing) {
+    // The above should never fail but if it does, no real harm is done.
+    // Some metric data will be lost but that is better than crashing.
+    NOTREACHED();
+    return;
+  }
+
+  // Merge the delta from the passed object to the one in the SR.
+  existing->AddSamples(*histogram->SnapshotFinalDelta());
+}
+
+PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
+    uint64_t id,
+    const void* user) {
+  return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
+}
+
+void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
+  memory_allocator_->CreateTrackingHistograms(name);
+}
+
+void PersistentHistogramAllocator::UpdateTrackingHistograms() {
+  memory_allocator_->UpdateTrackingHistograms();
+}
+
+void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
+  subtle::NoBarrier_Store(&last_created_, 0);
+}
+
+// static
+HistogramBase*
+PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
+  // Get the histogram in which create-results are stored. This is copied
+  // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
+  // added code to prevent recursion (a likely occurance because the creation
+  // of a new a histogram can end up calling this.)
+  static base::subtle::AtomicWord atomic_histogram_pointer = 0;
+  HistogramBase* histogram_pointer =
+      reinterpret_cast<HistogramBase*>(
+          base::subtle::Acquire_Load(&atomic_histogram_pointer));
+  if (!histogram_pointer) {
+    // It's possible for multiple threads to make it here in parallel but
+    // they'll always return the same result as there is a mutex in the Get.
+    // The purpose of the "initialized" variable is just to ensure that
+    // the same thread doesn't recurse which is also why it doesn't have
+    // to be atomic.
+    static bool initialized = false;
+    if (!initialized) {
+      initialized = true;
+      if (g_allocator) {
+// Don't log in release-with-asserts builds, otherwise the test_installer step
+// fails because this code writes to a log file before the installer code had a
+// chance to set the log file's location.
+#if !defined(DCHECK_ALWAYS_ON)
+        DLOG(WARNING) << "Creating the results-histogram inside persistent"
+                      << " memory can cause future allocations to crash if"
+                      << " that memory is ever released (for testing).";
+#endif
+      }
+
+      histogram_pointer = LinearHistogram::FactoryGet(
+          kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
+          HistogramBase::kUmaTargetedHistogramFlag);
+      base::subtle::Release_Store(
+          &atomic_histogram_pointer,
+          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
+    }
+  }
+  return histogram_pointer;
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
+    PersistentHistogramData* histogram_data_ptr) {
+  if (!histogram_data_ptr) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
+    NOTREACHED();
+    return nullptr;
+  }
+
+  // Sparse histograms are quite different so handle them as a special case.
+  if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
+    std::unique_ptr<HistogramBase> histogram =
+        SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
+                                          &histogram_data_ptr->samples_metadata,
+                                          &histogram_data_ptr->logged_metadata);
+    DCHECK(histogram);
+    histogram->SetFlags(histogram_data_ptr->flags);
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
+    return histogram;
+  }
+
+  // Copy the histogram_data to local storage because anything in persistent
+  // memory cannot be trusted as it could be changed at any moment by a
+  // malicious actor that shares access. The contents of histogram_data are
+  // validated below; the local copy is to ensure that the contents cannot
+  // be externally changed between validation and use.
+  PersistentHistogramData histogram_data = *histogram_data_ptr;
+
+  HistogramBase::Sample* ranges_data =
+      memory_allocator_->GetAsObject<HistogramBase::Sample>(
+          histogram_data.ranges_ref, kTypeIdRangesArray);
+
+  const uint32_t max_buckets =
+      std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
+  size_t required_bytes =
+      (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample);
+  size_t allocated_bytes =
+      memory_allocator_->GetAllocSize(histogram_data.ranges_ref);
+  if (!ranges_data || histogram_data.bucket_count < 2 ||
+      histogram_data.bucket_count >= max_buckets ||
+      allocated_bytes < required_bytes) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
+    NOTREACHED();
+    return nullptr;
+  }
+
+  std::unique_ptr<const BucketRanges> created_ranges =
+      CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
+                           histogram_data.bucket_count + 1);
+  if (!created_ranges) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
+    NOTREACHED();
+    return nullptr;
+  }
+  const BucketRanges* ranges =
+      StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
+          created_ranges.release());
+
+  HistogramBase::AtomicCount* counts_data =
+      memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
+          histogram_data.counts_ref, kTypeIdCountsArray);
+  size_t counts_bytes =
+      CalculateRequiredCountsBytes(histogram_data.bucket_count);
+  if (!counts_data || counts_bytes == 0 ||
+      memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
+          counts_bytes) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
+    NOTREACHED();
+    return nullptr;
+  }
+
+  // After the main "counts" array is a second array using for storing what
+  // was previously logged. This is used to calculate the "delta" during
+  // snapshot operations.
+  HistogramBase::AtomicCount* logged_data =
+      counts_data + histogram_data.bucket_count;
+
+  std::string name(histogram_data_ptr->name);
+  std::unique_ptr<HistogramBase> histogram;
+  switch (histogram_data.histogram_type) {
+    case HISTOGRAM:
+      histogram = Histogram::PersistentCreate(
+          name, histogram_data.minimum, histogram_data.maximum, ranges,
+          counts_data, logged_data, histogram_data.bucket_count,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case LINEAR_HISTOGRAM:
+      histogram = LinearHistogram::PersistentCreate(
+          name, histogram_data.minimum, histogram_data.maximum, ranges,
+          counts_data, logged_data, histogram_data.bucket_count,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case BOOLEAN_HISTOGRAM:
+      histogram = BooleanHistogram::PersistentCreate(
+          name, ranges, counts_data, logged_data,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case CUSTOM_HISTOGRAM:
+      histogram = CustomHistogram::PersistentCreate(
+          name, ranges, counts_data, logged_data, histogram_data.bucket_count,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    default:
+      NOTREACHED();
+  }
+
+  if (histogram) {
+    DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
+    histogram->SetFlags(histogram_data.flags);
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
+  } else {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
+  }
+
+  return histogram;
+}
+
+HistogramBase*
+PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
+    const HistogramBase* histogram) {
+  // This should never be called on the global histogram allocator as objects
+  // created there are already within the global statistics recorder.
+  DCHECK_NE(g_allocator, this);
+  DCHECK(histogram);
+
+  HistogramBase* existing =
+      StatisticsRecorder::FindHistogram(histogram->histogram_name());
+  if (existing)
+    return existing;
+
+  // Adding the passed histogram to the SR would cause a problem if the
+  // allocator that holds it eventually goes away. Instead, create a new
+  // one from a serialized version.
+  base::Pickle pickle;
+  if (!histogram->SerializeInfo(&pickle))
+    return nullptr;
+  PickleIterator iter(pickle);
+  existing = DeserializeHistogramInfo(&iter);
+  if (!existing)
+    return nullptr;
+
+  // Make sure there is no "serialization" flag set.
+  DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
+  // Record the newly created histogram in the SR.
+  return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
+}
+
+// static
+void PersistentHistogramAllocator::RecordCreateHistogramResult(
+    CreateHistogramResultType result) {
+  HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
+  if (result_histogram)
+    result_histogram->Add(result);
+}
+
+GlobalHistogramAllocator::~GlobalHistogramAllocator() {}
+
+// static
+void GlobalHistogramAllocator::CreateWithPersistentMemory(
+    void* base,
+    size_t size,
+    size_t page_size,
+    uint64_t id,
+    StringPiece name) {
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new PersistentMemoryAllocator(
+          base, size, page_size, id, name, false)))));
+}
+
+// static
+void GlobalHistogramAllocator::CreateWithLocalMemory(
+    size_t size,
+    uint64_t id,
+    StringPiece name) {
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
+}
+
+#if !defined(OS_NACL)
+// static
+void GlobalHistogramAllocator::CreateWithFile(
+    const FilePath& file_path,
+    size_t size,
+    uint64_t id,
+    StringPiece name) {
+  bool exists = PathExists(file_path);
+  File file(
+      file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
+                 File::FLAG_READ | File::FLAG_WRITE);
+
+  std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+  if (exists) {
+    mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
+  } else {
+    mmfile->Initialize(std::move(file), {0, static_cast<int64_t>(size)},
+                       MemoryMappedFile::READ_WRITE_EXTEND);
+  }
+  if (!mmfile->IsValid() ||
+      !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
+    NOTREACHED();
+    return;
+  }
+
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new FilePersistentMemoryAllocator(
+          std::move(mmfile), size, id, name, false)))));
+}
+#endif
+
+// static
+void GlobalHistogramAllocator::CreateWithSharedMemory(
+    std::unique_ptr<SharedMemory> memory,
+    size_t size,
+    uint64_t /*id*/,
+    StringPiece /*name*/) {
+  if ((!memory->memory() && !memory->Map(size)) ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
+    NOTREACHED();
+    return;
+  }
+
+  DCHECK_LE(memory->mapped_size(), size);
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new SharedPersistentMemoryAllocator(
+          std::move(memory), 0, StringPiece(), /*readonly=*/false)))));
+}
+
+// static
+void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t size) {
+  std::unique_ptr<SharedMemory> shm(
+      new SharedMemory(handle, /*readonly=*/false));
+  if (!shm->Map(size) ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+    NOTREACHED();
+    return;
+  }
+
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new SharedPersistentMemoryAllocator(
+          std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
+}
+
+// static
+void GlobalHistogramAllocator::Set(
+    std::unique_ptr<GlobalHistogramAllocator> allocator) {
+  // Releasing or changing an allocator is extremely dangerous because it
+  // likely has histograms stored within it. If the backing memory is also
+  // also released, future accesses to those histograms will seg-fault.
+  CHECK(!g_allocator);
+  g_allocator = allocator.release();
+  size_t existing = StatisticsRecorder::GetHistogramCount();
+
+  DVLOG_IF(1, existing)
+      << existing << " histograms were created before persistence was enabled.";
+}
+
+// static
+GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
+  return g_allocator;
+}
+
+// static
+std::unique_ptr<GlobalHistogramAllocator>
+GlobalHistogramAllocator::ReleaseForTesting() {
+  GlobalHistogramAllocator* histogram_allocator = g_allocator;
+  if (!histogram_allocator)
+    return nullptr;
+  PersistentMemoryAllocator* memory_allocator =
+      histogram_allocator->memory_allocator();
+
+  // Before releasing the memory, it's necessary to have the Statistics-
+  // Recorder forget about the histograms contained therein; otherwise,
+  // some operations will try to access them and the released memory.
+  PersistentMemoryAllocator::Iterator iter(memory_allocator);
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
+    PersistentHistogramData* histogram_data =
+        memory_allocator->GetAsObject<PersistentHistogramData>(
+            ref, kTypeIdHistogram);
+    DCHECK(histogram_data);
+    StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
+
+    // If a test breaks here then a memory region containing a histogram
+    // actively used by this code is being released back to the test.
+    // If that memory segment were to be deleted, future calls to create
+    // persistent histograms would crash. To avoid this, have the test call
+    // the method GetCreateHistogramResultHistogram() *before* setting
+    // the (temporary) memory allocator via SetGlobalAllocator() so that
+    // histogram is instead allocated from the process heap.
+    DCHECK_NE(kResultHistogram, histogram_data->name);
+  }
+
+  g_allocator = nullptr;
+  return WrapUnique(histogram_allocator);
+};
+
+void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
+  persistent_location_ = location;
+}
+
+const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
+  return persistent_location_;
+}
+
+bool GlobalHistogramAllocator::WriteToPersistentLocation() {
+#if defined(OS_NACL)
+  // NACL doesn't support file operations, including ImportantFileWriter.
+  NOTREACHED();
+  return false;
+#else
+  // Stop if no destination is set.
+  if (persistent_location_.empty()) {
+    NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
+                 << " to file because no location was set.";
+    return false;
+  }
+
+  StringPiece contents(static_cast<const char*>(data()), used());
+  if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
+                                                contents)) {
+    LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
+               << " to file: " << persistent_location_.value();
+    return false;
+  }
+
+  return true;
+#endif
+}
+
+GlobalHistogramAllocator::GlobalHistogramAllocator(
+    std::unique_ptr<PersistentMemoryAllocator> memory)
+    : PersistentHistogramAllocator(std::move(memory)),
+      import_iterator_(this) {}
+
+void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
+  // Skip the import if it's the histogram that was last created. Should a
+  // race condition cause the "last created" to be overwritten before it
+  // is recognized here then the histogram will be created and be ignored
+  // when it is detected as a duplicate by the statistics-recorder. This
+  // simple check reduces the time of creating persistent histograms by
+  // about 40%.
+  Reference record_to_ignore = last_created();
+
+  // There is no lock on this because the iterator is lock-free while still
+  // guaranteed to only return each entry only once. The StatisticsRecorder
+  // has its own lock so the Register operation is safe.
+  while (true) {
+    std::unique_ptr<HistogramBase> histogram =
+        import_iterator_.GetNextWithIgnore(record_to_ignore);
+    if (!histogram)
+      break;
+    StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
new file mode 100644
index 0000000..ee1fba5
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -0,0 +1,479 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+#define BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+
+#include <map>
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class FilePath;
+class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
+
+// Feature definition for enabling histogram persistence.
+BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
+
+
+// A data manager for sparse histograms so each instance of such doesn't have
+// to separately iterate over the entire memory segment. Though this class
+// will generally be accessed through the PersistentHistogramAllocator above,
+// it can be used independently on any PersistentMemoryAllocator (making it
+// useable for testing). This object supports only one instance of a sparse
+// histogram for a given id. Tests that create multiple identical histograms,
+// perhaps to simulate multiple processes, should create a separate manager
+// for each.
+class BASE_EXPORT PersistentSparseHistogramDataManager {
+ public:
+  // Constructs the data manager. The allocator must live longer than any
+  // managers that reference it.
+  explicit PersistentSparseHistogramDataManager(
+      PersistentMemoryAllocator* allocator);
+
+  ~PersistentSparseHistogramDataManager();
+
+  // Returns the object that manages the persistent-sample-map records for a
+  // given |id|. Only one |user| of this data is allowed at a time. This does
+  // an automatic Acquire() on the records. The user must call Release() on
+  // the returned object when it is finished with it. Ownership of the records
+  // object stays with this manager.
+  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+                                                  const void* user);
+
+  // Convenience method that gets the object for a given reference so callers
+  // don't have to also keep their own pointer to the appropriate allocator.
+  template <typename T>
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+    return allocator_->GetAsObject<T>(ref, type_id);
+  }
+
+ private:
+  friend class PersistentSampleMapRecords;
+
+  // Gets the object holding records for a given sample-map id when |lock_|
+  // has already been acquired.
+  PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id);
+
+  // Loads sample-map records looking for those belonging to the specified
+  // |load_id|. Records found for other sample-maps are held for later use
+  // without having to iterate again. This should be called only from a
+  // PersistentSampleMapRecords object because those objects have a contract
+  // that there are no other threads accessing the internal records_ field
+  // of the object that is passed in.
+  bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
+
+  // Weak-pointer to the allocator used by the sparse histograms.
+  PersistentMemoryAllocator* allocator_;
+
+  // Iterator within the allocator for finding sample records.
+  PersistentMemoryAllocator::Iterator record_iterator_;
+
+  // Mapping of sample-map IDs to their sample records.
+  std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
+      sample_records_;
+
+  // A lock used for synchronizing changes to sample_records_.
+  base::Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSparseHistogramDataManager);
+};
+
+
+// This class manages sample-records used by a PersistentSampleMap container
+// that underlies a persistent SparseHistogram object. It is broken out into a
+// top-level class so that it can be forward-declared in other header files
+// rather than include this entire file as would be necessary if it were
+// declared within the PersistentSparseHistogramDataManager class above.
+class BASE_EXPORT PersistentSampleMapRecords {
+ public:
+  // Constructs an instance of this class. The manager object must live longer
+  // than all instances of this class that reference it, which is not usually
+  // a problem since these objects are generally managed from within that
+  // manager instance.
+  PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
+                             uint64_t sample_map_id);
+
+  ~PersistentSampleMapRecords();
+
+  // Resets the internal state for a new object using this data. The return
+  // value is "this" as a convenience.
+  PersistentSampleMapRecords* Acquire(const void* user);
+
+  // Indicates that the using object is done with this data.
+  void Release(const void* user);
+
+  // Gets the next reference to a persistent sample-map record. The type and
+  // layout of the data being referenced is defined entirely within the
+  // PersistentSampleMap class.
+  PersistentMemoryAllocator::Reference GetNext();
+
+  // Creates a new persistent sample-map record for sample |value| and returns
+  // a reference to it.
+  PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value);
+
+  // Convenience method that gets the object for a given reference so callers
+  // don't have to also keep their own pointer to the appropriate allocator.
+  // This is expected to be used with the SampleRecord structure defined inside
+  // the persistent_sample_map.cc file but since that isn't exported (for
+  // cleanliness of the interface), a template is defined that will be
+  // resolved when used inside that file.
+  template <typename T>
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+    return data_manager_->GetAsObject<T>(ref, type_id);
+  }
+
+ private:
+  friend PersistentSparseHistogramDataManager;
+
+  // Weak-pointer to the parent data-manager object.
+  PersistentSparseHistogramDataManager* data_manager_;
+
+  // ID of PersistentSampleMap to which these records apply.
+  const uint64_t sample_map_id_;
+
+  // The current user of this set of records. It is used to ensure that no
+  // more than one object is using these records at a given time.
+  const void* user_ = nullptr;
+
+  // This is the count of how many "records" have already been read by the
+  // owning sample-map.
+  size_t seen_ = 0;
+
+  // This is the set of records previously found for a sample map. Because
+  // there is ever only one object with a given ID (typically a hash of a
+  // histogram name) and because the parent SparseHistogram has acquired
+  // its own lock before accessing the PersistentSampleMap it controls, this
+  // list can be accessed without acquiring any additional lock.
+  std::vector<PersistentMemoryAllocator::Reference> records_;
+
+  // This is the set of records found during iteration through memory. It
+  // is appended in bulk to "records". Access to this vector can be done
+  // only while holding the parent manager's lock.
+  std::vector<PersistentMemoryAllocator::Reference> found_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMapRecords);
+};
+
+
+// This class manages histograms created within a PersistentMemoryAllocator.
+class BASE_EXPORT PersistentHistogramAllocator {
+ public:
+  // A reference to a histogram. While this is implemented as PMA::Reference,
+  // it is not conceptually the same thing. Outside callers should always use
+  // a Reference matching the class it is for and not mix the two.
+  using Reference = PersistentMemoryAllocator::Reference;
+
+  // Iterator used for fetching persistent histograms from an allocator.
+  // It is lock-free and thread-safe.
+  // See PersistentMemoryAllocator::Iterator for more information.
+  class BASE_EXPORT Iterator {
+   public:
+    // Constructs an iterator on a given |allocator|, starting at the beginning.
+    // The allocator must live beyond the lifetime of the iterator.
+    explicit Iterator(PersistentHistogramAllocator* allocator);
+
+    // Gets the next histogram from persistent memory; returns null if there
+    // are no more histograms to be found. This may still be called again
+    // later to retrieve any new histograms added in the meantime.
+    std::unique_ptr<HistogramBase> GetNext() { return GetNextWithIgnore(0); }
+
+    // Gets the next histogram from persistent memory, ignoring one particular
+    // reference in the process. Pass |ignore| of zero (0) to ignore nothing.
+    std::unique_ptr<HistogramBase> GetNextWithIgnore(Reference ignore);
+
+   private:
+    // Weak-pointer to histogram allocator being iterated over.
+    PersistentHistogramAllocator* allocator_;
+
+    // The iterator used for stepping through objects in persistent memory.
+    // It is lock-free and thread-safe which is why this class is also such.
+    PersistentMemoryAllocator::Iterator memory_iter_;
+
+    DISALLOW_COPY_AND_ASSIGN(Iterator);
+  };
+
+  // A PersistentHistogramAllocator is constructed from a PersistentMemory-
+  // Allocator object of which it takes ownership.
+  explicit PersistentHistogramAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> memory);
+  virtual ~PersistentHistogramAllocator();
+
+  // Direct access to underlying memory allocator. If the segment is shared
+  // across threads or processes, reading data through these values does
+  // not guarantee consistency. Use with care. Do not write.
+  PersistentMemoryAllocator* memory_allocator() {
+    return memory_allocator_.get();
+  }
+
+  // Implement the "metadata" API of a PersistentMemoryAllocator, forwarding
+  // those requests to the real one.
+  uint64_t Id() const { return memory_allocator_->Id(); }
+  const char* Name() const { return memory_allocator_->Name(); }
+  const void* data() const { return memory_allocator_->data(); }
+  size_t length() const { return memory_allocator_->length(); }
+  size_t size() const { return memory_allocator_->size(); }
+  size_t used() const { return memory_allocator_->used(); }
+
+  // Recreate a Histogram from data held in persistent memory. Though this
+  // object will be local to the current process, the sample data will be
+  // shared with all other threads referencing it. This method takes a |ref|
+  // to where the top-level histogram data may be found in this allocator.
+  // This method will return null if any problem is detected with the data.
+  std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
+
+  // Allocate a new persistent histogram. The returned histogram will not
+  // be able to be located by other allocators until it is "finalized".
+  std::unique_ptr<HistogramBase> AllocateHistogram(
+      HistogramType histogram_type,
+      const std::string& name,
+      int minimum,
+      int maximum,
+      const BucketRanges* bucket_ranges,
+      int32_t flags,
+      Reference* ref_ptr);
+
+  // Finalize the creation of the histogram, making it available to other
+  // processes if |registered| (as in: added to the StatisticsRecorder) is
+  // True, forgetting it otherwise.
+  void FinalizeHistogram(Reference ref, bool registered);
+
+  // Merges the data in a persistent histogram with one held globally by the
+  // StatisticsRecorder, updating the "logged" samples within the passed
+  // object so that repeated merges are allowed. Don't call this on a "global"
+  // allocator because histograms created there will already be in the SR.
+  void MergeHistogramDeltaToStatisticsRecorder(HistogramBase* histogram);
+
+  // As above but merge the "final" delta. No update of "logged" samples is
+  // done which means it can operate on read-only objects. It's essential,
+  // however, not to call this more than once or those final samples will
+  // get recorded again.
+  void MergeHistogramFinalDeltaToStatisticsRecorder(
+      const HistogramBase* histogram);
+
+  // Returns the object that manages the persistent-sample-map records for a
+  // given |id|. Only one |user| of this data is allowed at a time. This does
+  // an automatic Acquire() on the records. The user must call Release() on
+  // the returned object when it is finished with it. Ownership stays with
+  // this allocator.
+  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+                                                  const void* user);
+
+  // Create internal histograms for tracking memory use and allocation sizes
+  // for allocator of |name| (which can simply be the result of Name()). This
+  // is done seperately from construction for situations such as when the
+  // histograms will be backed by memory provided by this very allocator.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histograms:
+  //    UMA.PersistentAllocator.name.Allocs
+  //    UMA.PersistentAllocator.name.UsedPct
+  void CreateTrackingHistograms(StringPiece name);
+  void UpdateTrackingHistograms();
+
+  // Clears the internal |last_created_| reference so testing can validate
+  // operation without that optimization.
+  void ClearLastCreatedReferenceForTesting();
+
+  // Histogram containing creation results. Visible for testing.
+  static HistogramBase* GetCreateHistogramResultHistogram();
+
+ protected:
+  // The structure used to hold histogram data in persistent memory. It is
+  // defined and used entirely within the .cc file.
+  struct PersistentHistogramData;
+
+  // Gets the reference of the last histogram created, used to avoid
+  // trying to import what was just created.
+  PersistentHistogramAllocator::Reference last_created() {
+    return subtle::NoBarrier_Load(&last_created_);
+  }
+
+  // Gets the next histogram in persistent data based on iterator while
+  // ignoring a particular reference if it is found.
+  std::unique_ptr<HistogramBase> GetNextHistogramWithIgnore(Iterator* iter,
+                                                            Reference ignore);
+
+ private:
+  // Enumerate possible creation results for reporting.
+  enum CreateHistogramResultType {
+    // Everything was fine.
+    CREATE_HISTOGRAM_SUCCESS = 0,
+
+    // Pointer to metadata was not valid.
+    CREATE_HISTOGRAM_INVALID_METADATA_POINTER,
+
+    // Histogram metadata was not valid.
+    CREATE_HISTOGRAM_INVALID_METADATA,
+
+    // Ranges information was not valid.
+    CREATE_HISTOGRAM_INVALID_RANGES_ARRAY,
+
+    // Counts information was not valid.
+    CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY,
+
+    // Could not allocate histogram memory due to corruption.
+    CREATE_HISTOGRAM_ALLOCATOR_CORRUPT,
+
+    // Could not allocate histogram memory due to lack of space.
+    CREATE_HISTOGRAM_ALLOCATOR_FULL,
+
+    // Could not allocate histogram memory due to unknown error.
+    CREATE_HISTOGRAM_ALLOCATOR_ERROR,
+
+    // Histogram was of unknown type.
+    CREATE_HISTOGRAM_UNKNOWN_TYPE,
+
+    // Instance has detected a corrupt allocator (recorded only once).
+    CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT,
+
+    // Always keep this at the end.
+    CREATE_HISTOGRAM_MAX
+  };
+
+  // Create a histogram based on saved (persistent) information about it.
+  std::unique_ptr<HistogramBase> CreateHistogram(
+      PersistentHistogramData* histogram_data_ptr);
+
+  // Gets or creates an object in the global StatisticsRecorder matching
+  // the |histogram| passed. Null is returned if one was not found and
+  // one could not be created.
+  HistogramBase* GetOrCreateStatisticsRecorderHistogram(
+      const HistogramBase* histogram);
+
+  // Record the result of a histogram creation.
+  static void RecordCreateHistogramResult(CreateHistogramResultType result);
+
+  // The memory allocator that provides the actual histogram storage.
+  std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
+
+  // The data-manager used to improve performance of sparse histograms.
+  PersistentSparseHistogramDataManager sparse_histogram_data_manager_;
+
+  // A reference to the last-created histogram in the allocator, used to avoid
+  // trying to import what was just created.
+  // TODO(bcwhite): Change this to std::atomic<PMA::Reference> when available.
+  subtle::Atomic32 last_created_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
+};
+
+
+// A special case of the PersistentHistogramAllocator that operates on a
+// global scale, collecting histograms created through standard macros and
+// the FactoryGet() method.
+class BASE_EXPORT GlobalHistogramAllocator
+    : public PersistentHistogramAllocator {
+ public:
+  ~GlobalHistogramAllocator() override;
+
+  // Create a global allocator using the passed-in memory |base|, |size|, and
+  // other parameters. Ownership of the memory segment remains with the caller.
+  static void CreateWithPersistentMemory(void* base,
+                                         size_t size,
+                                         size_t page_size,
+                                         uint64_t id,
+                                         StringPiece name);
+
+  // Create a global allocator using an internal block of memory of the
+  // specified |size| taken from the heap.
+  static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
+
+#if !defined(OS_NACL)
+  // Create a global allocator by memory-mapping a |file|. If the file does
+  // not exist, it will be created with the specified |size|. If the file does
+  // exist, the allocator will use and add to its contents, ignoring the passed
+  // size in favor of the existing size.
+  static void CreateWithFile(const FilePath& file_path,
+                             size_t size,
+                             uint64_t id,
+                             StringPiece name);
+#endif
+
+  // Create a global allocator using a block of shared |memory| of the
+  // specified |size|. The allocator takes ownership of the shared memory
+  // and releases it upon destruction, though the memory will continue to
+  // live if other processes have access to it.
+  static void CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,
+                                     size_t size,
+                                     uint64_t id,
+                                     StringPiece name);
+
+  // Create a global allocator using a block of shared memory accessed
+  // through the given |handle| and |size|. The allocator takes ownership
+  // of the handle and closes it upon destruction, though the memory will
+  // continue to live if other processes have access to it.
+  static void CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
+                                           size_t size);
+
+  // Sets a GlobalHistogramAllocator for globally storing histograms in
+  // a space that can be persisted or shared between processes. There is only
+  // ever one allocator for all such histograms created by a single process.
+  // This takes ownership of the object and should be called as soon as
+  // possible during startup to capture as many histograms as possible and
+  // while operating single-threaded so there are no race-conditions.
+  static void Set(std::unique_ptr<GlobalHistogramAllocator> allocator);
+
+  // Gets a pointer to the global histogram allocator. Returns null if none
+  // exists.
+  static GlobalHistogramAllocator* Get();
+
+  // This access to the persistent allocator is only for testing; it extracts
+  // the current allocator completely. This allows easy creation of histograms
+  // within persistent memory segments which can then be extracted and used in
+  // other ways.
+  static std::unique_ptr<GlobalHistogramAllocator> ReleaseForTesting();
+
+  // Stores a pathname to which the contents of this allocator should be saved
+  // in order to persist the data for a later use.
+  void SetPersistentLocation(const FilePath& location);
+
+  // Retrieves a previously set pathname to which the contents of this allocator
+  // are to be saved.
+  const FilePath& GetPersistentLocation() const;
+
+  // Writes the internal data to a previously set location. This is generally
+  // called when a process is exiting from a section of code that may not know
+  // the filesystem. The data is written in an atomic manner. The return value
+  // indicates success.
+  bool WriteToPersistentLocation();
+
+ private:
+  friend class StatisticsRecorder;
+
+  // Creates a new global histogram allocator.
+  explicit GlobalHistogramAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> memory);
+
+  // Import new histograms from the global histogram allocator. It's possible
+  // for other processes to create histograms in the active memory segment;
+  // this adds those to the internal list of known histograms to avoid creating
+  // duplicates that would have to be merged during reporting. Every call to
+  // this method resumes from the last entry it saw; it costs nothing if
+  // nothing new has been added.
+  void ImportHistogramsToStatisticsRecorder();
+
+  // Import always continues from where it left off, making use of a single
+  // iterator to continue the work.
+  Iterator import_iterator_;
+
+  // The location to which the data should be persisted.
+  FilePath persistent_location_;
+
+  DISALLOW_COPY_AND_ASSIGN(GlobalHistogramAllocator);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
new file mode 100644
index 0000000..b680662
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -0,0 +1,209 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/statistics_recorder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class PersistentHistogramAllocatorTest : public testing::Test {
+ protected:
+  const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
+
+  PersistentHistogramAllocatorTest()
+      : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()) {
+    CreatePersistentHistogramAllocator();
+  }
+  ~PersistentHistogramAllocatorTest() override {
+    DestroyPersistentHistogramAllocator();
+  }
+
+  void CreatePersistentHistogramAllocator() {
+    allocator_memory_.reset(new char[kAllocatorMemorySize]);
+
+    GlobalHistogramAllocator::ReleaseForTesting();
+    memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
+    GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
+        "PersistentHistogramAllocatorTest");
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
+  }
+
+  void DestroyPersistentHistogramAllocator() {
+    allocator_ = nullptr;
+    GlobalHistogramAllocator::ReleaseForTesting();
+  }
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<char[]> allocator_memory_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocatorTest);
+};
+
+TEST_F(PersistentHistogramAllocatorTest, CreateAndIterateTest) {
+  PersistentMemoryAllocator::MemoryInfo meminfo0;
+  allocator_->GetMemoryInfo(&meminfo0);
+
+  // Try basic construction
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(histogram);
+  histogram->CheckName("TestHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  allocator_->GetMemoryInfo(&meminfo1);
+  EXPECT_GT(meminfo0.free, meminfo1.free);
+
+  HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+      "TestLinearHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(linear_histogram);
+  linear_histogram->CheckName("TestLinearHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  allocator_->GetMemoryInfo(&meminfo2);
+  EXPECT_GT(meminfo1.free, meminfo2.free);
+
+  HistogramBase* boolean_histogram = BooleanHistogram::FactoryGet(
+      "TestBooleanHistogram", HistogramBase::kIsPersistent);
+  EXPECT_TRUE(boolean_histogram);
+  boolean_histogram->CheckName("TestBooleanHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo3;
+  allocator_->GetMemoryInfo(&meminfo3);
+  EXPECT_GT(meminfo2.free, meminfo3.free);
+
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(5);
+  HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+      "TestCustomHistogram", custom_ranges, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(custom_histogram);
+  custom_histogram->CheckName("TestCustomHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo4;
+  allocator_->GetMemoryInfo(&meminfo4);
+  EXPECT_GT(meminfo3.free, meminfo4.free);
+
+  PersistentMemoryAllocator::Iterator iter(allocator_);
+  uint32_t type;
+  EXPECT_NE(0U, iter.GetNext(&type));  // Histogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // LinearHistogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // BooleanHistogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // CustomHistogram
+  EXPECT_EQ(0U, iter.GetNext(&type));
+
+  // Create a second allocator and have it access the memory of the first.
+  std::unique_ptr<HistogramBase> recovered;
+  PersistentHistogramAllocator recovery(
+      WrapUnique(new PersistentMemoryAllocator(
+          allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+  PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestHistogram");
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestLinearHistogram");
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestBooleanHistogram");
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+  recovered->CheckName("TestCustomHistogram");
+
+  recovered = histogram_iter.GetNext();
+  EXPECT_FALSE(recovered);
+}
+
+TEST_F(PersistentHistogramAllocatorTest, CreateWithFileTest) {
+  const char temp_name[] = "CreateWithFileTest";
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath temp_file = temp_dir.path().AppendASCII(temp_name);
+  const size_t temp_size = 64 << 10;  // 64 KiB
+
+  // Test creation of a new file.
+  GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, temp_name);
+  EXPECT_EQ(std::string(temp_name),
+            GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+  // Test re-open of a possibly-existing file.
+  GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, "");
+  EXPECT_EQ(std::string(temp_name),
+            GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+  // Test re-open of an known-existing file.
+  GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithFile(temp_file, 0, 0, "");
+  EXPECT_EQ(std::string(temp_name),
+            GlobalHistogramAllocator::Get()->memory_allocator()->Name());
+
+  // Final release so file and temp-dir can be removed.
+  GlobalHistogramAllocator::ReleaseForTesting();
+}
+
+TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderTest) {
+  size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+
+  // Create a local StatisticsRecorder in which the newly created histogram
+  // will be recorded.
+  std::unique_ptr<StatisticsRecorder> local_sr =
+      StatisticsRecorder::CreateTemporaryForTesting();
+  EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
+
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      "TestHistogram", 1, 10, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(histogram);
+  EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
+  histogram->Add(3);
+  histogram->Add(1);
+  histogram->Add(4);
+  histogram->Add(1);
+  histogram->Add(6);
+
+  // Destroy the local SR and ensure that we're back to the initial state.
+  local_sr.reset();
+  EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
+
+  // Create a second allocator and have it access the memory of the first.
+  std::unique_ptr<HistogramBase> recovered;
+  PersistentHistogramAllocator recovery(
+      WrapUnique(new PersistentMemoryAllocator(
+          allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+  PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
+
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
+
+  // Merge the recovered histogram to the SR. It will always be a new object.
+  recovery.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
+  EXPECT_EQ(starting_sr_count + 1, StatisticsRecorder::GetHistogramCount());
+  HistogramBase* found =
+      StatisticsRecorder::FindHistogram(recovered->histogram_name());
+  ASSERT_TRUE(found);
+  EXPECT_NE(recovered.get(), found);
+
+  // Ensure that the data got merged, too.
+  std::unique_ptr<HistogramSamples> snapshot = found->SnapshotSamples();
+  EXPECT_EQ(recovered->SnapshotSamples()->TotalCount(), snapshot->TotalCount());
+  EXPECT_EQ(1, snapshot->GetCount(3));
+  EXPECT_EQ(2, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(4));
+  EXPECT_EQ(1, snapshot->GetCount(6));
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
new file mode 100644
index 0000000..dfa408f
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -0,0 +1,830 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include <assert.h>
+#include <algorithm>
+
+#if defined(OS_WIN)
+#include "winbase.h"
+#elif defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_macros.h"
+
+namespace {
+
+// Limit of memory segment size. It has to fit in an unsigned 32-bit number
+// and should be a power of 2 in order to accomodate almost any page size.
+const uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
+
+// A constant (random) value placed in the shared metadata to identify
+// an already initialized memory segment.
+const uint32_t kGlobalCookie = 0x408305DC;
+
+// The current version of the metadata. If updates are made that change
+// the metadata, the version number can be queried to operate in a backward-
+// compatible manner until the memory segment is completely re-initalized.
+const uint32_t kGlobalVersion = 1;
+
+// Constant values placed in the block headers to indicate its state.
+const uint32_t kBlockCookieFree = 0;
+const uint32_t kBlockCookieQueue = 1;
+const uint32_t kBlockCookieWasted = (uint32_t)-1;
+const uint32_t kBlockCookieAllocated = 0xC8799269;
+
+// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
+// types rather than combined bitfield.
+
+// Flags stored in the flags_ field of the SharedMetaData structure below.
+enum : int {
+  kFlagCorrupt = 1 << 0,
+  kFlagFull    = 1 << 1
+};
+
+bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
+  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
+  return (loaded_flags & flag) != 0;
+}
+
+void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
+  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
+  for (;;) {
+    uint32_t new_flags = (loaded_flags & ~flag) | flag;
+    // In the failue case, actual "flags" value stored in loaded_flags.
+    if (flags->compare_exchange_weak(loaded_flags, new_flags))
+      break;
+  }
+}
+
+}  // namespace
+
+namespace base {
+
+// All allocations and data-structures must be aligned to this byte boundary.
+// Alignment as large as the physical bus between CPU and RAM is _required_
+// for some architectures, is simply more efficient on other CPUs, and
+// generally a Good Idea(tm) for all platforms as it reduces/eliminates the
+// chance that a type will span cache lines. Alignment mustn't be less
+// than 8 to ensure proper alignment for all types. The rest is a balance
+// between reducing spans across multiple cache lines and wasted space spent
+// padding out allocations. An alignment of 16 would ensure that the block
+// header structure always sits in a single cache line. An average of about
+// 1/2 this value will be wasted with every allocation.
+const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
+
+// The block-header is placed at the top of every allocation within the
+// segment to describe the data that follows it.
+struct PersistentMemoryAllocator::BlockHeader {
+  uint32_t size;       // Number of bytes in this block, including header.
+  uint32_t cookie;     // Constant value indicating completed allocation.
+  std::atomic<uint32_t> type_id;  // Arbitrary number indicating data type.
+  std::atomic<uint32_t> next;     // Pointer to the next block when iterating.
+};
+
+// The shared metadata exists once at the top of the memory segment to
+// describe the state of the allocator to all processes.
+struct PersistentMemoryAllocator::SharedMetadata {
+  uint32_t cookie;     // Some value that indicates complete initialization.
+  uint32_t size;       // Total size of memory segment.
+  uint32_t page_size;  // Paging size within memory segment.
+  uint32_t version;    // Version code so upgrades don't break.
+  uint64_t id;         // Arbitrary ID number given by creator.
+  uint32_t name;       // Reference to stored name string.
+
+  // Above is read-only after first construction. Below may be changed and
+  // so must be marked "volatile" to provide correct inter-process behavior.
+
+  // Bitfield of information flags. Access to this should be done through
+  // the CheckFlag() and SetFlag() methods defined above.
+  volatile std::atomic<uint32_t> flags;
+
+  // Offset/reference to first free space in segment.
+  volatile std::atomic<uint32_t> freeptr;
+
+  // The "iterable" queue is an M&S Queue as described here, append-only:
+  // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+  volatile std::atomic<uint32_t> tailptr;  // Last block of iteration queue.
+  volatile BlockHeader queue;   // Empty block for linked-list head/tail.
+};
+
+// The "queue" block header is used to detect "last node" so that zero/null
+// can be used to indicate that it hasn't been added at all. It is part of
+// the SharedMetadata structure which itself is always located at offset zero.
+const PersistentMemoryAllocator::Reference
+    PersistentMemoryAllocator::kReferenceQueue =
+        offsetof(SharedMetadata, queue);
+
+const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
+    FILE_PATH_LITERAL(".pma");
+
+
+PersistentMemoryAllocator::Iterator::Iterator(
+    const PersistentMemoryAllocator* allocator)
+    : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
+
+PersistentMemoryAllocator::Iterator::Iterator(
+    const PersistentMemoryAllocator* allocator,
+    Reference starting_after)
+    : allocator_(allocator), last_record_(starting_after), record_count_(0) {
+  // Ensure that the starting point is a valid, iterable block (meaning it can
+  // be read and has a non-zero "next" pointer).
+  const volatile BlockHeader* block =
+      allocator_->GetBlock(starting_after, 0, 0, false, false);
+  if (!block || block->next.load(std::memory_order_relaxed) == 0) {
+    NOTREACHED();
+    last_record_.store(kReferenceQueue, std::memory_order_release);
+  }
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
+  // Make a copy of the existing count of found-records, acquiring all changes
+  // made to the allocator, notably "freeptr" (see comment in loop for why
+  // the load of that value cannot be moved above here) that occurred during
+  // any previous runs of this method, including those by parallel threads
+  // that interrupted it. It pairs with the Release at the end of this method.
+  //
+  // Otherwise, if the compiler were to arrange the two loads such that
+  // "count" was fetched _after_ "freeptr" then it would be possible for
+  // this thread to be interrupted between them and other threads perform
+  // multiple allocations, make-iterables, and iterations (with the included
+  // increment of |record_count_|) culminating in the check at the bottom
+  // mistakenly determining that a loop exists. Isn't this stuff fun?
+  uint32_t count = record_count_.load(std::memory_order_acquire);
+
+  Reference last = last_record_.load(std::memory_order_acquire);
+  Reference next;
+  while (true) {
+    const volatile BlockHeader* block =
+        allocator_->GetBlock(last, 0, 0, true, false);
+    if (!block)  // Invalid iterator state.
+      return kReferenceNull;
+
+    // The compiler and CPU can freely reorder all memory accesses on which
+    // there are no dependencies. It could, for example, move the load of
+    // "freeptr" to above this point because there are no explicit dependencies
+    // between it and "next". If it did, however, then another block could
+    // be queued after that but before the following load meaning there is
+    // one more queued block than the future "detect loop by having more
+    // blocks that could fit before freeptr" will allow.
+    //
+    // By "acquiring" the "next" value here, it's synchronized to the enqueue
+    // of the node which in turn is synchronized to the allocation (which sets
+    // freeptr). Thus, the scenario above cannot happen.
+    next = block->next.load(std::memory_order_acquire);
+    if (next == kReferenceQueue)  // No next allocation in queue.
+      return kReferenceNull;
+    block = allocator_->GetBlock(next, 0, 0, false, false);
+    if (!block) {  // Memory is corrupt.
+      allocator_->SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // Update the "last_record" pointer to be the reference being returned.
+    // If it fails then another thread has already iterated past it so loop
+    // again. Failing will also load the existing value into "last" so there
+    // is no need to do another such load when the while-loop restarts. A
+    // "strong" compare-exchange is used because failing unnecessarily would
+    // mean repeating some fairly costly validations above.
+    if (last_record_.compare_exchange_strong(last, next)) {
+      *type_return = block->type_id.load(std::memory_order_relaxed);
+      break;
+    }
+  }
+
+  // Memory corruption could cause a loop in the list. Such must be detected
+  // so as to not cause an infinite loop in the caller. This is done by simply
+  // making sure it doesn't iterate more times than the absolute maximum
+  // number of allocations that could have been made. Callers are likely
+  // to loop multiple times before it is detected but at least it stops.
+  const uint32_t freeptr = std::min(
+      allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
+      allocator_->mem_size_);
+  const uint32_t max_records =
+      freeptr / (sizeof(BlockHeader) + kAllocAlignment);
+  if (count > max_records) {
+    allocator_->SetCorrupt();
+    return kReferenceNull;
+  }
+
+  // Increment the count and release the changes made above. It pairs with
+  // the Acquire at the top of this method. Note that this operation is not
+  // strictly synchonized with fetching of the object to return, which would
+  // have to be done inside the loop and is somewhat complicated to achieve.
+  // It does not matter if it falls behind temporarily so long as it never
+  // gets ahead.
+  record_count_.fetch_add(1, std::memory_order_release);
+  return next;
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
+  Reference ref;
+  uint32_t type_found;
+  while ((ref = GetNext(&type_found)) != 0) {
+    if (type_found == type_match)
+      return ref;
+  }
+  return kReferenceNull;
+}
+
+
+// static
+bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
+                                                   size_t size,
+                                                   size_t page_size,
+                                                   bool readonly) {
+  return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
+          (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
+          (size % kAllocAlignment == 0 || readonly) &&
+          (page_size == 0 || size % page_size == 0 || readonly));
+}
+
+PersistentMemoryAllocator::PersistentMemoryAllocator(
+    void* base,
+    size_t size,
+    size_t page_size,
+    uint64_t id,
+    base::StringPiece name,
+    bool readonly)
+    : mem_base_(static_cast<char*>(base)),
+      mem_size_(static_cast<uint32_t>(size)),
+      mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
+      readonly_(readonly),
+      corrupt_(0),
+      allocs_histogram_(nullptr),
+      used_histogram_(nullptr) {
+  static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
+                "BlockHeader is not a multiple of kAllocAlignment");
+  static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
+                "SharedMetadata is not a multiple of kAllocAlignment");
+  static_assert(kReferenceQueue % kAllocAlignment == 0,
+                "\"queue\" is not aligned properly; must be at end of struct");
+
+  // Ensure that memory segment is of acceptable size.
+  CHECK(IsMemoryAcceptable(base, size, page_size, readonly));
+
+  // These atomics operate inter-process and so must be lock-free. The local
+  // casts are to make sure it can be evaluated at compile time to a constant.
+  CHECK(((SharedMetadata*)0)->freeptr.is_lock_free());
+  CHECK(((SharedMetadata*)0)->flags.is_lock_free());
+  CHECK(((BlockHeader*)0)->next.is_lock_free());
+  CHECK(corrupt_.is_lock_free());
+
+  if (shared_meta()->cookie != kGlobalCookie) {
+    if (readonly) {
+      SetCorrupt();
+      return;
+    }
+
+    // This block is only executed when a completely new memory segment is
+    // being initialized. It's unshared and single-threaded...
+    volatile BlockHeader* const first_block =
+        reinterpret_cast<volatile BlockHeader*>(mem_base_ +
+                                                sizeof(SharedMetadata));
+    if (shared_meta()->cookie != 0 ||
+        shared_meta()->size != 0 ||
+        shared_meta()->version != 0 ||
+        shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
+        shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
+        shared_meta()->id != 0 ||
+        shared_meta()->name != 0 ||
+        shared_meta()->tailptr != 0 ||
+        shared_meta()->queue.cookie != 0 ||
+        shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
+        first_block->size != 0 ||
+        first_block->cookie != 0 ||
+        first_block->type_id.load(std::memory_order_relaxed) != 0 ||
+        first_block->next != 0) {
+      // ...or something malicious has been playing with the metadata.
+      SetCorrupt();
+    }
+
+    // This is still safe to do even if corruption has been detected.
+    shared_meta()->cookie = kGlobalCookie;
+    shared_meta()->size = mem_size_;
+    shared_meta()->page_size = mem_page_;
+    shared_meta()->version = kGlobalVersion;
+    shared_meta()->id = id;
+    shared_meta()->freeptr.store(sizeof(SharedMetadata),
+                                 std::memory_order_release);
+
+    // Set up the queue of iterable allocations.
+    shared_meta()->queue.size = sizeof(BlockHeader);
+    shared_meta()->queue.cookie = kBlockCookieQueue;
+    shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
+    shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
+
+    // Allocate space for the name so other processes can learn it.
+    if (!name.empty()) {
+      const size_t name_length = name.length() + 1;
+      shared_meta()->name = Allocate(name_length, 0);
+      char* name_cstr = GetAsObject<char>(shared_meta()->name, 0);
+      if (name_cstr)
+        memcpy(name_cstr, name.data(), name.length());
+    }
+  } else {
+    if (shared_meta()->size == 0 ||
+        shared_meta()->version == 0 ||
+        shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
+        shared_meta()->tailptr == 0 ||
+        shared_meta()->queue.cookie == 0 ||
+        shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
+      SetCorrupt();
+    }
+    if (!readonly) {
+      // The allocator is attaching to a previously initialized segment of
+      // memory. If the initialization parameters differ, make the best of it
+      // by reducing the local construction parameters to match those of
+      // the actual memory area. This ensures that the local object never
+      // tries to write outside of the original bounds.
+      // Because the fields are const to ensure that no code other than the
+      // constructor makes changes to them as well as to give optimization
+      // hints to the compiler, it's necessary to const-cast them for changes
+      // here.
+      if (shared_meta()->size < mem_size_)
+        *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size;
+      if (shared_meta()->page_size < mem_page_)
+        *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size;
+
+      // Ensure that settings are still valid after the above adjustments.
+      if (!IsMemoryAcceptable(base, mem_size_, mem_page_, readonly))
+        SetCorrupt();
+    }
+  }
+}
+
+PersistentMemoryAllocator::~PersistentMemoryAllocator() {
+  // It's strictly forbidden to do any memory access here in case there is
+  // some issue with the underlying memory segment. The "Local" allocator
+  // makes use of this to allow deletion of the segment on the heap from
+  // within its destructor.
+}
+
+uint64_t PersistentMemoryAllocator::Id() const {
+  return shared_meta()->id;
+}
+
+const char* PersistentMemoryAllocator::Name() const {
+  Reference name_ref = shared_meta()->name;
+  const char* name_cstr = GetAsObject<char>(name_ref, 0);
+  if (!name_cstr)
+    return "";
+
+  size_t name_length = GetAllocSize(name_ref);
+  if (name_cstr[name_length - 1] != '\0') {
+    NOTREACHED();
+    SetCorrupt();
+    return "";
+  }
+
+  return name_cstr;
+}
+
+void PersistentMemoryAllocator::CreateTrackingHistograms(
+    base::StringPiece name) {
+  if (name.empty() || readonly_)
+    return;
+
+  std::string name_string = name.as_string();
+  DCHECK(!used_histogram_);
+  used_histogram_ = LinearHistogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
+      HistogramBase::kUmaTargetedHistogramFlag);
+
+  DCHECK(!allocs_histogram_);
+  allocs_histogram_ = Histogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+      HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+size_t PersistentMemoryAllocator::used() const {
+  return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
+                  mem_size_);
+}
+
+size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
+  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return 0;
+  uint32_t size = block->size;
+  // Header was verified by GetBlock() but a malicious actor could change
+  // the value between there and here. Check it again.
+  if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
+    SetCorrupt();
+    return 0;
+  }
+  return size - sizeof(BlockHeader);
+}
+
+uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
+  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return 0;
+  return block->type_id.load(std::memory_order_relaxed);
+}
+
+bool PersistentMemoryAllocator::ChangeType(Reference ref,
+                                           uint32_t to_type_id,
+                                           uint32_t from_type_id) {
+  DCHECK(!readonly_);
+  volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return false;
+
+  // This is a "strong" exchange because there is no loop that can retry in
+  // the wake of spurious failures possible with "weak" exchanges.
+  return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
+    size_t req_size,
+    uint32_t type_id) {
+  Reference ref = AllocateImpl(req_size, type_id);
+  if (ref) {
+    // Success: Record this allocation in usage stats (if active).
+    if (allocs_histogram_)
+      allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
+  } else {
+    // Failure: Record an allocation of zero for tracking.
+    if (allocs_histogram_)
+      allocs_histogram_->Add(0);
+  }
+  return ref;
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
+    size_t req_size,
+    uint32_t type_id) {
+  DCHECK(!readonly_);
+
+  // Validate req_size to ensure it won't overflow when used as 32-bit value.
+  if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
+    NOTREACHED();
+    return kReferenceNull;
+  }
+
+  // Round up the requested size, plus header, to the next allocation alignment.
+  uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader));
+  size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
+  if (size <= sizeof(BlockHeader) || size > mem_page_) {
+    NOTREACHED();
+    return kReferenceNull;
+  }
+
+  // Get the current start of unallocated memory. Other threads may
+  // update this at any time and cause us to retry these operations.
+  // This value should be treated as "const" to avoid confusion through
+  // the code below but recognize that any failed compare-exchange operation
+  // involving it will cause it to be loaded with a more recent value. The
+  // code should either exit or restart the loop in that case.
+  /* const */ uint32_t freeptr =
+      shared_meta()->freeptr.load(std::memory_order_acquire);
+
+  // Allocation is lockless so we do all our caculation and then, if saving
+  // indicates a change has occurred since we started, scrap everything and
+  // start over.
+  for (;;) {
+    if (IsCorrupt())
+      return kReferenceNull;
+
+    if (freeptr + size > mem_size_) {
+      SetFlag(&shared_meta()->flags, kFlagFull);
+      return kReferenceNull;
+    }
+
+    // Get pointer to the "free" block. If something has been allocated since
+    // the load of freeptr above, it is still safe as nothing will be written
+    // to that location until after the compare-exchange below.
+    volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
+    if (!block) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // An allocation cannot cross page boundaries. If it would, create a
+    // "wasted" block and begin again at the top of the next page. This
+    // area could just be left empty but we fill in the block header just
+    // for completeness sake.
+    const uint32_t page_free = mem_page_ - freeptr % mem_page_;
+    if (size > page_free) {
+      if (page_free <= sizeof(BlockHeader)) {
+        SetCorrupt();
+        return kReferenceNull;
+      }
+      const uint32_t new_freeptr = freeptr + page_free;
+      if (shared_meta()->freeptr.compare_exchange_strong(freeptr,
+                                                         new_freeptr)) {
+        block->size = page_free;
+        block->cookie = kBlockCookieWasted;
+      }
+      continue;
+    }
+
+    // Don't leave a slice at the end of a page too small for anything. This
+    // can result in an allocation up to two alignment-sizes greater than the
+    // minimum required by requested-size + header + alignment.
+    if (page_free - size < sizeof(BlockHeader) + kAllocAlignment)
+      size = page_free;
+
+    const uint32_t new_freeptr = freeptr + size;
+    if (new_freeptr > mem_size_) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // Save our work. Try again if another thread has completed an allocation
+    // while we were processing. A "weak" exchange would be permissable here
+    // because the code will just loop and try again but the above processing
+    // is significant so make the extra effort of a "strong" exchange.
+    if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr))
+      continue;
+
+    // Given that all memory was zeroed before ever being given to an instance
+    // of this class and given that we only allocate in a monotomic fashion
+    // going forward, it must be that the newly allocated block is completely
+    // full of zeros. If we find anything in the block header that is NOT a
+    // zero then something must have previously run amuck through memory,
+    // writing beyond the allocated space and into unallocated space.
+    if (block->size != 0 ||
+        block->cookie != kBlockCookieFree ||
+        block->type_id.load(std::memory_order_relaxed) != 0 ||
+        block->next.load(std::memory_order_relaxed) != 0) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    block->size = size;
+    block->cookie = kBlockCookieAllocated;
+    block->type_id.store(type_id, std::memory_order_relaxed);
+    return freeptr;
+  }
+}
+
+void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
+  uint32_t remaining = std::max(
+      mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
+      (uint32_t)sizeof(BlockHeader));
+  meminfo->total = mem_size_;
+  meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::MakeIterable(Reference ref) {
+  DCHECK(!readonly_);
+  if (IsCorrupt())
+    return;
+  volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
+  if (!block)  // invalid reference
+    return;
+  if (block->next.load(std::memory_order_acquire) != 0)  // Already iterable.
+    return;
+  block->next.store(kReferenceQueue, std::memory_order_release);  // New tail.
+
+  // Try to add this block to the tail of the queue. May take multiple tries.
+  // If so, tail will be automatically updated with a more recent value during
+  // compare-exchange operations.
+  uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
+  for (;;) {
+    // Acquire the current tail-pointer released by previous call to this
+    // method and validate it.
+    block = GetBlock(tail, 0, 0, true, false);
+    if (!block) {
+      SetCorrupt();
+      return;
+    }
+
+    // Try to insert the block at the tail of the queue. The tail node always
+    // has an existing value of kReferenceQueue; if that is somehow not the
+    // existing value then another thread has acted in the meantime. A "strong"
+    // exchange is necessary so the "else" block does not get executed when
+    // that is not actually the case (which can happen with a "weak" exchange).
+    uint32_t next = kReferenceQueue;  // Will get replaced with existing value.
+    if (block->next.compare_exchange_strong(next, ref,
+                                            std::memory_order_acq_rel,
+                                            std::memory_order_acquire)) {
+      // Update the tail pointer to the new offset. If the "else" clause did
+      // not exist, then this could be a simple Release_Store to set the new
+      // value but because it does, it's possible that other threads could add
+      // one or more nodes at the tail before reaching this point. We don't
+      // have to check the return value because it either operates correctly
+      // or the exact same operation has already been done (by the "else"
+      // clause) on some other thread.
+      shared_meta()->tailptr.compare_exchange_strong(tail, ref,
+                                                     std::memory_order_release,
+                                                     std::memory_order_relaxed);
+      return;
+    } else {
+      // In the unlikely case that a thread crashed or was killed between the
+      // update of "next" and the update of "tailptr", it is necessary to
+      // perform the operation that would have been done. There's no explicit
+      // check for crash/kill which means that this operation may also happen
+      // even when the other thread is in perfect working order which is what
+      // necessitates the CompareAndSwap above.
+      shared_meta()->tailptr.compare_exchange_strong(tail, next,
+                                                     std::memory_order_acq_rel,
+                                                     std::memory_order_acquire);
+    }
+  }
+}
+
+// The "corrupted" state is held both locally and globally (shared). The
+// shared flag can't be trusted since a malicious actor could overwrite it.
+// Because corruption can be detected during read-only operations such as
+// iteration, this method may be called by other "const" methods. In this
+// case, it's safe to discard the constness and modify the local flag and
+// maybe even the shared flag if the underlying data isn't actually read-only.
+void PersistentMemoryAllocator::SetCorrupt() const {
+  LOG(ERROR) << "Corruption detected in shared-memory segment.";
+  const_cast<std::atomic<bool>*>(&corrupt_)->store(true,
+                                                   std::memory_order_relaxed);
+  if (!readonly_) {
+    SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
+            kFlagCorrupt);
+  }
+}
+
+bool PersistentMemoryAllocator::IsCorrupt() const {
+  if (corrupt_.load(std::memory_order_relaxed) ||
+      CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
+    SetCorrupt();  // Make sure all indicators are set.
+    return true;
+  }
+  return false;
+}
+
+bool PersistentMemoryAllocator::IsFull() const {
+  return CheckFlag(&shared_meta()->flags, kFlagFull);
+}
+
+// Dereference a block |ref| and ensure that it's valid for the desired
+// |type_id| and |size|. |special| indicates that we may try to access block
+// headers not available to callers but still accessed by this module. By
+// having internal dereferences go through this same function, the allocator
+// is hardened against corruption.
+const volatile PersistentMemoryAllocator::BlockHeader*
+PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
+                                    uint32_t size, bool queue_ok,
+                                    bool free_ok) const {
+  // Validation of parameters.
+  if (ref % kAllocAlignment != 0)
+    return nullptr;
+  if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
+    return nullptr;
+  size += sizeof(BlockHeader);
+  if (ref + size > mem_size_)
+    return nullptr;
+
+  // Validation of referenced block-header.
+  if (!free_ok) {
+    uint32_t freeptr = std::min(
+        shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
+    if (ref + size > freeptr)
+      return nullptr;
+    const volatile BlockHeader* const block =
+        reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
+    if (block->size < size)
+      return nullptr;
+    if (ref + block->size > freeptr)
+      return nullptr;
+    if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
+      return nullptr;
+    if (type_id != 0 &&
+        block->type_id.load(std::memory_order_relaxed) != type_id) {
+      return nullptr;
+    }
+  }
+
+  // Return pointer to block data.
+  return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+}
+
+const volatile void* PersistentMemoryAllocator::GetBlockData(
+    Reference ref,
+    uint32_t type_id,
+    uint32_t size) const {
+  DCHECK(size > 0);
+  const volatile BlockHeader* block =
+      GetBlock(ref, type_id, size, false, false);
+  if (!block)
+    return nullptr;
+  return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::UpdateTrackingHistograms() {
+  DCHECK(!readonly_);
+  if (used_histogram_) {
+    MemoryInfo meminfo;
+    GetMemoryInfo(&meminfo);
+    HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
+        ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
+    used_histogram_->Add(used_percent);
+  }
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
+    size_t size,
+    uint64_t id,
+    base::StringPiece name)
+    : PersistentMemoryAllocator(AllocateLocalMemory(size),
+                                size, 0, id, name, false) {}
+
+LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
+  DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_);
+}
+
+// static
+void* LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+#if defined(OS_WIN)
+  void* address =
+      ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+  DPCHECK(address);
+  return address;
+#elif defined(OS_POSIX)
+  // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
+  // MAP_SHARED is not available on Linux <2.4 but required on Mac.
+  void* address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
+                         MAP_ANON | MAP_SHARED, -1, 0);
+  DPCHECK(MAP_FAILED != address);
+  return address;
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+// static
+void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory,
+                                                           size_t size) {
+#if defined(OS_WIN)
+  BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
+  DPCHECK(success);
+#elif defined(OS_POSIX)
+  int result = ::munmap(memory, size);
+  DPCHECK(0 == result);
+#else
+#error This architecture is not (yet) supported.
+#endif
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
+    std::unique_ptr<SharedMemory> memory,
+    uint64_t id,
+    base::StringPiece name,
+    bool read_only)
+    : PersistentMemoryAllocator(static_cast<uint8_t*>(memory->memory()),
+                                memory->mapped_size(),
+                                0,
+                                id,
+                                name,
+                                read_only),
+      shared_memory_(std::move(memory)) {}
+
+SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
+
+// static
+bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
+    const SharedMemory& memory) {
+  return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false);
+}
+
+
+#if !defined(OS_NACL)
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
+    std::unique_ptr<MemoryMappedFile> file,
+    size_t max_size,
+    uint64_t id,
+    base::StringPiece name,
+    bool read_only)
+    : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
+                                max_size != 0 ? max_size : file->length(),
+                                0,
+                                id,
+                                name,
+                                read_only),
+      mapped_file_(std::move(file)) {}
+
+FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
+
+// static
+bool FilePersistentMemoryAllocator::IsFileAcceptable(
+    const MemoryMappedFile& file,
+    bool read_only) {
+  return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
+}
+#endif  // !defined(OS_NACL)
+
+}  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
new file mode 100644
index 0000000..2fc0d2d
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator.h
@@ -0,0 +1,429 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <atomic>
+#include <memory>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/files/file_path.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class HistogramBase;
+class MemoryMappedFile;
+class SharedMemory;
+
+// Simple allocator for pieces of a memory block that may be persistent
+// to some storage or shared across multiple processes. This class resides
+// under base/metrics because it was written for that purpose. It is,
+// however, fully general-purpose and can be freely moved to base/memory
+// if other uses are found.
+//
+// This class provides for thread-secure (i.e. safe against other threads
+// or processes that may be compromised and thus have malicious intent)
+// allocation of memory within a designated block and also a mechanism by
+// which other threads can learn of these allocations.
+//
+// There is (currently) no way to release an allocated block of data because
+// doing so would risk invalidating pointers held by other processes and
+// greatly complicate the allocation algorithm.
+//
+// Construction of this object can accept new, clean (i.e. zeroed) memory
+// or previously initialized memory. In the first case, construction must
+// be allowed to complete before letting other allocators attach to the same
+// segment. In other words, don't share the segment until at least one
+// allocator has been attached to it.
+//
+// Note that memory not in active use is not accessed so it is possible to
+// use virtual memory, including memory-mapped files, as backing storage with
+// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
+class BASE_EXPORT PersistentMemoryAllocator {
+ public:
+  typedef uint32_t Reference;
+
+  // Iterator for going through all iterable memory records in an allocator.
+  // Like the allocator itself, iterators are lock-free and thread-secure.
+  // That means that multiple threads can share an iterator and the same
+  // reference will not be returned twice.
+  //
+  // Iteration, in general, is tolerant of corrupted memory. It will return
+  // what it can and stop only when corruption forces it to. Bad corruption
+  // could cause the same object to be returned many times but it will
+  // eventually quit.
+  class BASE_EXPORT Iterator {
+   public:
+    // Constructs an iterator on a given |allocator|, starting at the beginning.
+    // The allocator must live beyond the lifetime of the iterator. This class
+    // has read-only access to the allocator (hence "const") but the returned
+    // references can be used on a read/write version, too.
+    explicit Iterator(const PersistentMemoryAllocator* allocator);
+
+    // As above but resuming from the |starting_after| reference. The first call
+    // to GetNext() will return the next object found after that reference. The
+    // reference must be to an "iterable" object; references to non-iterable
+    // objects (those that never had MakeIterable() called for them) will cause
+    // a run-time error.
+    Iterator(const PersistentMemoryAllocator* allocator,
+             Reference starting_after);
+
+    // Gets the next iterable, storing that type in |type_return|. The actual
+    // return value is a reference to the allocation inside the allocator or
+    // zero if there are no more. GetNext() may still be called again at a
+    // later time to retrieve any new allocations that have been added.
+    Reference GetNext(uint32_t* type_return);
+
+    // Similar to above but gets the next iterable of a specific |type_match|.
+    // This should not be mixed with calls to GetNext() because any allocations
+    // skipped here due to a type mis-match will never be returned by later
+    // calls to GetNext() meaning it's possible to completely miss entries.
+    Reference GetNextOfType(uint32_t type_match);
+
+    // Converts references to objects. This is a convenience method so that
+    // users of the iterator don't need to also have their own pointer to the
+    // allocator over which the iterator runs in order to retrieve objects.
+    // Because the iterator is not read/write, only "const" objects can be
+    // fetched. Non-const objects can be fetched using the reference on a
+    // non-const (external) pointer to the same allocator (or use const_cast
+    // to remove the qualifier).
+    template <typename T>
+    const T* GetAsObject(Reference ref, uint32_t type_id) const {
+      return allocator_->GetAsObject<T>(ref, type_id);
+    }
+
+   private:
+    // Weak-pointer to memory allocator being iterated over.
+    const PersistentMemoryAllocator* allocator_;
+
+    // The last record that was returned.
+    std::atomic<Reference> last_record_;
+
+    // The number of records found; used for detecting loops.
+    std::atomic<uint32_t> record_count_;
+
+    DISALLOW_COPY_AND_ASSIGN(Iterator);
+  };
+
+  // Returned information about the internal state of the heap.
+  struct MemoryInfo {
+    size_t total;
+    size_t free;
+  };
+
+  enum : Reference {
+    kReferenceNull = 0  // A common "null" reference value.
+  };
+
+  enum : uint32_t {
+    kTypeIdAny = 0  // Match any type-id inside GetAsObject().
+  };
+
+  // This is the standard file extension (suitable for being passed to the
+  // AddExtension() method of base::FilePath) for dumps of persistent memory.
+  static const base::FilePath::CharType kFileExtension[];
+
+  // The allocator operates on any arbitrary block of memory. Creation and
+  // persisting or sharing of that block with another process is the
+  // responsibility of the caller. The allocator needs to know only the
+  // block's |base| address, the total |size| of the block, and any internal
+  // |page| size (zero if not paged) across which allocations should not span.
+  // The |id| is an arbitrary value the caller can use to identify a
+  // particular memory segment. It will only be loaded during the initial
+  // creation of the segment and can be checked by the caller for consistency.
+  // The |name|, if provided, is used to distinguish histograms for this
+  // allocator. Only the primary owner of the segment should define this value;
+  // other processes can learn it from the shared state. If the underlying
+  // memory is |readonly| then no changes will be made to it. The resulting
+  // object should be stored as a "const" pointer.
+  //
+  // PersistentMemoryAllocator does NOT take ownership of the memory block.
+  // The caller must manage it and ensure it stays available throughout the
+  // lifetime of this object.
+  //
+  // Memory segments for sharing must have had an allocator attached to them
+  // before actually being shared. If the memory segment was just created, it
+  // should be zeroed before being passed here. If it was an existing segment,
+  // the values here will be compared to copies stored in the shared segment
+  // as a guard against corruption.
+  //
+  // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
+  // method below) before construction if the definition of the segment can
+  // vary in any way at run-time. Invalid memory segments will cause a crash.
+  PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
+                            uint64_t id, base::StringPiece name,
+                            bool readonly);
+  virtual ~PersistentMemoryAllocator();
+
+  // Check if memory segment is acceptable for creation of an Allocator. This
+  // doesn't do any analysis of the data and so doesn't guarantee that the
+  // contents are valid, just that the paramaters won't cause the program to
+  // abort. The IsCorrupt() method will report detection of data problems
+  // found during construction and general operation.
+  static bool IsMemoryAcceptable(const void* data, size_t size,
+                                 size_t page_size, bool readonly);
+
+  // Get the internal identifier for this persistent memory segment.
+  uint64_t Id() const;
+
+  // Get the internal name of this allocator (possibly an empty string).
+  const char* Name() const;
+
+  // Is this segment open only for read?
+  bool IsReadonly() { return readonly_; }
+
+  // Create internal histograms for tracking memory use and allocation sizes
+  // for allocator of |name| (which can simply be the result of Name()). This
+  // is done seperately from construction for situations such as when the
+  // histograms will be backed by memory provided by this very allocator.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histograms:
+  //    UMA.PersistentAllocator.name.Allocs
+  //    UMA.PersistentAllocator.name.UsedPct
+  void CreateTrackingHistograms(base::StringPiece name);
+
+  // Direct access to underlying memory segment. If the segment is shared
+  // across threads or processes, reading data through these values does
+  // not guarantee consistency. Use with care. Do not write.
+  const void* data() const { return const_cast<const char*>(mem_base_); }
+  size_t length() const { return mem_size_; }
+  size_t size() const { return mem_size_; }
+  size_t used() const;
+
+  // Get an object referenced by a |ref|. For safety reasons, the |type_id|
+  // code and size-of(|T|) are compared to ensure the reference is valid
+  // and cannot return an object outside of the memory segment. A |type_id| of
+  // kTypeIdAny (zero) will match any though the size is still checked. NULL is
+  // returned if any problem is detected, such as corrupted storage or incorrect
+  // parameters. Callers MUST check that the returned value is not-null EVERY
+  // TIME before accessing it or risk crashing! Once dereferenced, the pointer
+  // is safe to reuse forever.
+  //
+  // NOTE: Though this method will guarantee that an object of the specified
+  // type can be accessed without going outside the bounds of the memory
+  // segment, it makes no guarantees of the validity of the data within the
+  // object itself. If it is expected that the contents of the segment could
+  // be compromised with malicious intent, the object must be hardened as well.
+  //
+  // Though the persistent data may be "volatile" if it is shared with
+  // other processes, such is not necessarily the case. The internal
+  // "volatile" designation is discarded so as to not propagate the viral
+  // nature of that keyword to the caller. It can add it back, if necessary,
+  // based on knowledge of how the allocator is being used.
+  template <typename T>
+  T* GetAsObject(Reference ref, uint32_t type_id) {
+    static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+    return const_cast<T*>(
+        reinterpret_cast<volatile T*>(GetBlockData(ref, type_id, sizeof(T))));
+  }
+  template <typename T>
+  const T* GetAsObject(Reference ref, uint32_t type_id) const {
+    static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+    return const_cast<const T*>(
+        reinterpret_cast<const volatile T*>(GetBlockData(
+            ref, type_id, sizeof(T))));
+  }
+
+  // Get the number of bytes allocated to a block. This is useful when storing
+  // arrays in order to validate the ending boundary. The returned value will
+  // include any padding added to achieve the required alignment and so could
+  // be larger than given in the original Allocate() request.
+  size_t GetAllocSize(Reference ref) const;
+
+  // Access the internal "type" of an object. This generally isn't necessary
+  // but can be used to "clear" the type and so effectively mark it as deleted
+  // even though the memory stays valid and allocated. Changing the type is
+  // an atomic compare/exchange and so requires knowing the existing value.
+  // It will return false if the existing type is not what is expected.
+  uint32_t GetType(Reference ref) const;
+  bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
+
+  // Reserve space in the memory segment of the desired |size| and |type_id|.
+  // A return value of zero indicates the allocation failed, otherwise the
+  // returned reference can be used by any process to get a real pointer via
+  // the GetAsObject() call.
+  Reference Allocate(size_t size, uint32_t type_id);
+
+  // Allocated objects can be added to an internal list that can then be
+  // iterated over by other processes. If an allocated object can be found
+  // another way, such as by having its reference within a different object
+  // that will be made iterable, then this call is not necessary. This always
+  // succeeds unless corruption is detected; check IsCorrupted() to find out.
+  // Once an object is made iterable, its position in iteration can never
+  // change; new iterable objects will always be added after it in the series.
+  void MakeIterable(Reference ref);
+
+  // Get the information about the amount of free space in the allocator. The
+  // amount of free space should be treated as approximate due to extras from
+  // alignment and metadata. Concurrent allocations from other threads will
+  // also make the true amount less than what is reported.
+  void GetMemoryInfo(MemoryInfo* meminfo) const;
+
+  // If there is some indication that the memory has become corrupted,
+  // calling this will attempt to prevent further damage by indicating to
+  // all processes that something is not as expected.
+  void SetCorrupt() const;
+
+  // This can be called to determine if corruption has been detected in the
+  // segment, possibly my a malicious actor. Once detected, future allocations
+  // will fail and iteration may not locate all objects.
+  bool IsCorrupt() const;
+
+  // Flag set if an allocation has failed because the memory segment was full.
+  bool IsFull() const;
+
+  // Update those "tracking" histograms which do not get updates during regular
+  // operation, such as how much memory is currently used. This should be
+  // called before such information is to be displayed or uploaded.
+  void UpdateTrackingHistograms();
+
+ protected:
+  volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
+  const uint32_t mem_size_;        // Size of entire memory segment.
+  const uint32_t mem_page_;        // Page size allocations shouldn't cross.
+
+ private:
+  struct SharedMetadata;
+  struct BlockHeader;
+  static const uint32_t kAllocAlignment;
+  static const Reference kReferenceQueue;
+
+  // The shared metadata is always located at the top of the memory segment.
+  // These convenience functions eliminate constant casting of the base
+  // pointer within the code.
+  const SharedMetadata* shared_meta() const {
+    return reinterpret_cast<const SharedMetadata*>(
+        const_cast<const char*>(mem_base_));
+  }
+  SharedMetadata* shared_meta() {
+    return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
+  }
+
+  // Actual method for doing the allocation.
+  Reference AllocateImpl(size_t size, uint32_t type_id);
+
+  // Get the block header associated with a specific reference.
+  const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
+                                       uint32_t size, bool queue_ok,
+                                       bool free_ok) const;
+  volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
+                                 bool queue_ok, bool free_ok) {
+      return const_cast<volatile BlockHeader*>(
+          const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
+              ref, type_id, size, queue_ok, free_ok));
+  }
+
+  // Get the actual data within a block associated with a specific reference.
+  const volatile void* GetBlockData(Reference ref, uint32_t type_id,
+                                    uint32_t size) const;
+  volatile void* GetBlockData(Reference ref, uint32_t type_id,
+                              uint32_t size) {
+      return const_cast<volatile void*>(
+          const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
+              ref, type_id, size));
+  }
+
+  const bool readonly_;              // Indicates access to read-only memory.
+  std::atomic<bool> corrupt_;        // Local version of "corrupted" flag.
+
+  HistogramBase* allocs_histogram_;  // Histogram recording allocs.
+  HistogramBase* used_histogram_;    // Histogram recording used space.
+
+  friend class PersistentMemoryAllocatorTest;
+  FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
+  DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
+};
+
+
+// This allocator uses a local memory block it allocates from the general
+// heap. It is generally used when some kind of "death rattle" handler will
+// save the contents to persistent storage during process shutdown. It is
+// also useful for testing.
+class BASE_EXPORT LocalPersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  LocalPersistentMemoryAllocator(size_t size, uint64_t id,
+                                 base::StringPiece name);
+  ~LocalPersistentMemoryAllocator() override;
+
+ private:
+  // Allocates a block of local memory of the specified |size|, ensuring that
+  // the memory will not be physically allocated until accessed and will read
+  // as zero when that happens.
+  static void* AllocateLocalMemory(size_t size);
+
+  // Deallocates a block of local |memory| of the specified |size|.
+  static void DeallocateLocalMemory(void* memory, size_t size);
+
+  DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
+};
+
+
+// This allocator takes a shared-memory object and performs allocation from
+// it. The memory must be previously mapped via Map() or MapAt(). The allocator
+// takes ownership of the memory object.
+class BASE_EXPORT SharedPersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
+                                  uint64_t id,
+                                  base::StringPiece name,
+                                  bool read_only);
+  ~SharedPersistentMemoryAllocator() override;
+
+  SharedMemory* shared_memory() { return shared_memory_.get(); }
+
+  // Ensure that the memory isn't so invalid that it won't crash when passing it
+  // to the allocator. This doesn't guarantee the data is valid, just that it
+  // won't cause the program to abort. The existing IsCorrupt() call will handle
+  // the rest.
+  static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
+
+ private:
+  std::unique_ptr<SharedMemory> shared_memory_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
+};
+
+
+#if !defined(OS_NACL)  // NACL doesn't support any kind of file access in build.
+// This allocator takes a memory-mapped file object and performs allocation
+// from it. The allocator takes ownership of the file object.
+class BASE_EXPORT FilePersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  // A |max_size| of zero will use the length of the file as the maximum
+  // size. The |file| object must have been already created with sufficient
+  // permissions (read, read/write, or read/write/extend).
+  FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
+                                size_t max_size,
+                                uint64_t id,
+                                base::StringPiece name,
+                                bool read_only);
+  ~FilePersistentMemoryAllocator() override;
+
+  // Ensure that the file isn't so invalid that it won't crash when passing it
+  // to the allocator. This doesn't guarantee the file is valid, just that it
+  // won't cause the program to abort. The existing IsCorrupt() call will handle
+  // the rest.
+  static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
+
+ private:
+  std::unique_ptr<MemoryMappedFile> mapped_file_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
+};
+#endif  // !defined(OS_NACL)
+
+}  // namespace base
+
+#endif  // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
new file mode 100644
index 0000000..a3d90c2
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -0,0 +1,815 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include <memory>
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
+#include "base/strings/safe_sprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+const uint32_t TEST_MEMORY_SIZE = 1 << 20;   // 1 MiB
+const uint32_t TEST_MEMORY_PAGE = 64 << 10;  // 64 KiB
+const uint32_t TEST_ID = 12345;
+const char TEST_NAME[] = "TestAllocator";
+
+}  // namespace
+
+namespace base {
+
+typedef PersistentMemoryAllocator::Reference Reference;
+
+class PersistentMemoryAllocatorTest : public testing::Test {
+ public:
+  // This can't be statically initialized because it's value isn't defined
+  // in the PersistentMemoryAllocator header file. Instead, it's simply set
+  // in the constructor.
+  uint32_t kAllocAlignment;
+
+  struct TestObject1 {
+    int onething;
+    char oranother;
+  };
+
+  struct TestObject2 {
+    int thiis;
+    long that;
+    float andthe;
+    char other;
+    double thing;
+  };
+
+  PersistentMemoryAllocatorTest() {
+    kAllocAlignment = GetAllocAlignment();
+    mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
+  }
+
+  void SetUp() override {
+    allocator_.reset();
+    ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
+    allocator_.reset(new PersistentMemoryAllocator(
+        mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
+        TEST_ID, TEST_NAME, false));
+    allocator_->CreateTrackingHistograms(allocator_->Name());
+  }
+
+  void TearDown() override {
+    allocator_.reset();
+  }
+
+  unsigned CountIterables() {
+    PersistentMemoryAllocator::Iterator iter(allocator_.get());
+    uint32_t type;
+    unsigned count = 0;
+    while (iter.GetNext(&type) != 0) {
+      ++count;
+    }
+    return count;
+  }
+
+  static uint32_t GetAllocAlignment() {
+    return PersistentMemoryAllocator::kAllocAlignment;
+  }
+
+ protected:
+  std::unique_ptr<char[]> mem_segment_;
+  std::unique_ptr<PersistentMemoryAllocator> allocator_;
+};
+
+TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
+  std::string base_name(TEST_NAME);
+  EXPECT_EQ(TEST_ID, allocator_->Id());
+  EXPECT_TRUE(allocator_->used_histogram_);
+  EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
+            allocator_->used_histogram_->histogram_name());
+  EXPECT_TRUE(allocator_->allocs_histogram_);
+  EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".Allocs",
+            allocator_->allocs_histogram_->histogram_name());
+
+  // Get base memory info for later comparison.
+  PersistentMemoryAllocator::MemoryInfo meminfo0;
+  allocator_->GetMemoryInfo(&meminfo0);
+  EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
+  EXPECT_GT(meminfo0.total, meminfo0.free);
+
+  // Validate allocation of test object and make sure it can be referenced
+  // and all metadata looks correct.
+  Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+  EXPECT_NE(0U, block1);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
+  EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
+  EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
+            allocator_->GetAllocSize(block1));
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  allocator_->GetMemoryInfo(&meminfo1);
+  EXPECT_EQ(meminfo0.total, meminfo1.total);
+  EXPECT_GT(meminfo0.free, meminfo1.free);
+
+  // Ensure that the test-object can be made iterable.
+  PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
+  uint32_t type;
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+  allocator_->MakeIterable(block1);
+  EXPECT_EQ(block1, iter1a.GetNext(&type));
+  EXPECT_EQ(1U, type);
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+  // Create second test-object and ensure everything is good and it cannot
+  // be confused with test-object of another type.
+  Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
+  EXPECT_NE(0U, block2);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
+  EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
+  EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
+            allocator_->GetAllocSize(block2));
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  allocator_->GetMemoryInfo(&meminfo2);
+  EXPECT_EQ(meminfo1.total, meminfo2.total);
+  EXPECT_GT(meminfo1.free, meminfo2.free);
+
+  // Ensure that second test-object can also be made iterable.
+  allocator_->MakeIterable(block2);
+  EXPECT_EQ(block2, iter1a.GetNext(&type));
+  EXPECT_EQ(2U, type);
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+  // Check that iteration can begin after an arbitrary location.
+  PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
+  EXPECT_EQ(block2, iter1b.GetNext(&type));
+  EXPECT_EQ(0U, iter1b.GetNext(&type));
+
+  // Ensure nothing has gone noticably wrong.
+  EXPECT_FALSE(allocator_->IsFull());
+  EXPECT_FALSE(allocator_->IsCorrupt());
+
+  // Check the internal histogram record of used memory.
+  allocator_->UpdateTrackingHistograms();
+  std::unique_ptr<HistogramSamples> used_samples(
+      allocator_->used_histogram_->SnapshotSamples());
+  EXPECT_TRUE(used_samples);
+  EXPECT_EQ(1, used_samples->TotalCount());
+
+  // Check the internal histogram record of allocation requests.
+  std::unique_ptr<HistogramSamples> allocs_samples(
+      allocator_->allocs_histogram_->SnapshotSamples());
+  EXPECT_TRUE(allocs_samples);
+  EXPECT_EQ(2, allocs_samples->TotalCount());
+  EXPECT_EQ(0, allocs_samples->GetCount(0));
+  EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1)));
+  EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject2)));
+#if !DCHECK_IS_ON()  // DCHECK builds will die at a NOTREACHED().
+  EXPECT_EQ(0U, allocator_->Allocate(TEST_MEMORY_SIZE + 1, 0));
+  allocs_samples = allocator_->allocs_histogram_->SnapshotSamples();
+  EXPECT_EQ(3, allocs_samples->TotalCount());
+  EXPECT_EQ(1, allocs_samples->GetCount(0));
+#endif
+
+  // Check that an objcet's type can be changed.
+  EXPECT_EQ(2U, allocator_->GetType(block2));
+  allocator_->ChangeType(block2, 3, 2);
+  EXPECT_EQ(3U, allocator_->GetType(block2));
+  allocator_->ChangeType(block2, 2, 3);
+  EXPECT_EQ(2U, allocator_->GetType(block2));
+
+  // Create second allocator (read/write) using the same memory segment.
+  std::unique_ptr<PersistentMemoryAllocator> allocator2(
+      new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+                                    TEST_MEMORY_PAGE, 0, "", false));
+  EXPECT_EQ(TEST_ID, allocator2->Id());
+  EXPECT_FALSE(allocator2->used_histogram_);
+  EXPECT_FALSE(allocator2->allocs_histogram_);
+  EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_);
+
+  // Ensure that iteration and access through second allocator works.
+  PersistentMemoryAllocator::Iterator iter2(allocator2.get());
+  EXPECT_EQ(block1, iter2.GetNext(&type));
+  EXPECT_EQ(block2, iter2.GetNext(&type));
+  EXPECT_EQ(0U, iter2.GetNext(&type));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
+
+  // Create a third allocator (read-only) using the same memory segment.
+  std::unique_ptr<const PersistentMemoryAllocator> allocator3(
+      new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+                                    TEST_MEMORY_PAGE, 0, "", true));
+  EXPECT_EQ(TEST_ID, allocator3->Id());
+  EXPECT_FALSE(allocator3->used_histogram_);
+  EXPECT_FALSE(allocator3->allocs_histogram_);
+
+  // Ensure that iteration and access through third allocator works.
+  PersistentMemoryAllocator::Iterator iter3(allocator3.get());
+  EXPECT_EQ(block1, iter3.GetNext(&type));
+  EXPECT_EQ(block2, iter3.GetNext(&type));
+  EXPECT_EQ(0U, iter3.GetNext(&type));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
+
+  // Ensure that GetNextOfType works.
+  PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
+  EXPECT_EQ(block2, iter1c.GetNextOfType(2));
+  EXPECT_EQ(0U, iter1c.GetNextOfType(2));
+}
+
+TEST_F(PersistentMemoryAllocatorTest, PageTest) {
+  // This allocation will go into the first memory page.
+  Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
+  EXPECT_LT(0U, block1);
+  EXPECT_GT(TEST_MEMORY_PAGE, block1);
+
+  // This allocation won't fit in same page as previous block.
+  Reference block2 =
+      allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
+  EXPECT_EQ(TEST_MEMORY_PAGE, block2);
+
+  // This allocation will also require a new page.
+  Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
+  EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
+}
+
+// A simple thread that takes an allocator and repeatedly allocates random-
+// sized chunks from it until no more can be done.
+class AllocatorThread : public SimpleThread {
+ public:
+  AllocatorThread(const std::string& name,
+                  void* base,
+                  uint32_t size,
+                  uint32_t page_size)
+      : SimpleThread(name, Options()),
+        count_(0),
+        iterable_(0),
+        allocator_(base, size, page_size, 0, std::string(), false) {}
+
+  void Run() override {
+    for (;;) {
+      uint32_t size = RandInt(1, 99);
+      uint32_t type = RandInt(100, 999);
+      Reference block = allocator_.Allocate(size, type);
+      if (!block)
+        break;
+
+      count_++;
+      if (RandInt(0, 1)) {
+        allocator_.MakeIterable(block);
+        iterable_++;
+      }
+    }
+  }
+
+  unsigned iterable() { return iterable_; }
+  unsigned count() { return count_; }
+
+ private:
+  unsigned count_;
+  unsigned iterable_;
+  PersistentMemoryAllocator allocator_;
+};
+
+// Test parallel allocation/iteration and ensure consistency across all
+// instances.
+TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
+  void* memory = mem_segment_.get();
+  AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  unsigned last_count = 0;
+  do {
+    unsigned count = CountIterables();
+    EXPECT_LE(last_count, count);
+  } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  EXPECT_FALSE(allocator_->IsCorrupt());
+  EXPECT_TRUE(allocator_->IsFull());
+  EXPECT_EQ(CountIterables(),
+            t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
+            t5.iterable());
+}
+
+// A simple thread that counts objects by iterating through an allocator.
+class CounterThread : public SimpleThread {
+ public:
+  CounterThread(const std::string& name,
+                PersistentMemoryAllocator::Iterator* iterator,
+                Lock* lock,
+                ConditionVariable* condition,
+                bool* wake_up)
+      : SimpleThread(name, Options()),
+        iterator_(iterator),
+        lock_(lock),
+        condition_(condition),
+        count_(0),
+        wake_up_(wake_up) {}
+
+  void Run() override {
+    // Wait so all threads can start at approximately the same time.
+    // Best performance comes from releasing a single worker which then
+    // releases the next, etc., etc.
+    {
+      AutoLock autolock(*lock_);
+
+      // Before calling Wait(), make sure that the wake up condition
+      // has not already passed.  Also, since spurious signal events
+      // are possible, check the condition in a while loop to make
+      // sure that the wake up condition is met when this thread
+      // returns from the Wait().
+      // See usage comments in src/base/synchronization/condition_variable.h.
+      while (!*wake_up_) {
+        condition_->Wait();
+        condition_->Signal();
+      }
+    }
+
+    uint32_t type;
+    while (iterator_->GetNext(&type) != 0) {
+      ++count_;
+    }
+  }
+
+  unsigned count() { return count_; }
+
+ private:
+  PersistentMemoryAllocator::Iterator* iterator_;
+  Lock* lock_;
+  ConditionVariable* condition_;
+  unsigned count_;
+  bool* wake_up_;
+
+  DISALLOW_COPY_AND_ASSIGN(CounterThread);
+};
+
+// Ensure that parallel iteration returns the same number of objects as
+// single-threaded iteration.
+TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
+  // Fill the memory segment with random allocations.
+  unsigned iterable_count = 0;
+  for (;;) {
+    uint32_t size = RandInt(1, 99);
+    uint32_t type = RandInt(100, 999);
+    Reference block = allocator_->Allocate(size, type);
+    if (!block)
+      break;
+    allocator_->MakeIterable(block);
+    ++iterable_count;
+  }
+  EXPECT_FALSE(allocator_->IsCorrupt());
+  EXPECT_TRUE(allocator_->IsFull());
+  EXPECT_EQ(iterable_count, CountIterables());
+
+  PersistentMemoryAllocator::Iterator iter(allocator_.get());
+  Lock lock;
+  ConditionVariable condition(&lock);
+  bool wake_up = false;
+
+  CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
+  CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
+  CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
+  CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
+  CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  // Take the lock and set the wake up condition to true.  This helps to
+  // avoid a race condition where the Signal() event is called before
+  // all the threads have reached the Wait() and thus never get woken up.
+  {
+    AutoLock autolock(lock);
+    wake_up = true;
+  }
+
+  // This will release all the waiting threads.
+  condition.Signal();
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  EXPECT_EQ(iterable_count,
+            t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
+
+#if 0
+  // These ensure that the threads don't run sequentially. It shouldn't be
+  // enabled in general because it could lead to a flaky test if it happens
+  // simply by chance but it is useful during development to ensure that the
+  // test is working correctly.
+  EXPECT_NE(iterable_count, t1.count());
+  EXPECT_NE(iterable_count, t2.count());
+  EXPECT_NE(iterable_count, t3.count());
+  EXPECT_NE(iterable_count, t4.count());
+  EXPECT_NE(iterable_count, t5.count());
+#endif
+}
+
+// This test doesn't verify anything other than it doesn't crash. Its goal
+// is to find coding errors that aren't otherwise tested for, much like a
+// "fuzzer" would.
+// This test is suppsoed to fail on TSAN bot (crbug.com/579867).
+#if defined(THREAD_SANITIZER)
+#define MAYBE_CorruptionTest DISABLED_CorruptionTest
+#else
+#define MAYBE_CorruptionTest CorruptionTest
+#endif
+TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
+  char* memory = mem_segment_.get();
+  AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  do {
+    size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
+    char value = RandInt(0, 255);
+    memory[offset] = value;
+  } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  CountIterables();
+}
+
+// Attempt to cause crashes or loops by expressly creating dangerous conditions.
+TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
+  Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+  Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
+  Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
+  Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
+  Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
+  allocator_->MakeIterable(block1);
+  allocator_->MakeIterable(block2);
+  allocator_->MakeIterable(block3);
+  allocator_->MakeIterable(block4);
+  allocator_->MakeIterable(block5);
+  EXPECT_EQ(5U, CountIterables());
+  EXPECT_FALSE(allocator_->IsCorrupt());
+
+  // Create loop in iterable list and ensure it doesn't hang. The return value
+  // from CountIterables() in these cases is unpredictable. If there is a
+  // failure, the call will hang and the test killed for taking too long.
+  uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
+  EXPECT_EQ(block5, header4[3]);
+  header4[3] = block4;
+  CountIterables();  // loop: 1-2-3-4-4
+  EXPECT_TRUE(allocator_->IsCorrupt());
+
+  // Test where loop goes back to previous block.
+  header4[3] = block3;
+  CountIterables();  // loop: 1-2-3-4-3
+
+  // Test where loop goes back to the beginning.
+  header4[3] = block1;
+  CountIterables();  // loop: 1-2-3-4-1
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
+  LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
+  EXPECT_EQ(42U, allocator.Id());
+  EXPECT_NE(0U, allocator.Allocate(24, 1));
+  EXPECT_FALSE(allocator.IsFull());
+  EXPECT_FALSE(allocator.IsCorrupt());
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
+  SharedMemoryHandle shared_handle_1;
+  SharedMemoryHandle shared_handle_2;
+
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  Reference r123, r456, r789;
+  {
+    std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
+    ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
+    SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
+                                          false);
+    EXPECT_FALSE(local.IsReadonly());
+    r123 = local.Allocate(123, 123);
+    r456 = local.Allocate(456, 456);
+    r789 = local.Allocate(789, 789);
+    local.MakeIterable(r123);
+    local.ChangeType(r456, 654, 456);
+    local.MakeIterable(r789);
+    local.GetMemoryInfo(&meminfo1);
+    EXPECT_FALSE(local.IsFull());
+    EXPECT_FALSE(local.IsCorrupt());
+
+    ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+                                                      &shared_handle_1));
+    ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(),
+                                                      &shared_handle_2));
+  }
+
+  // Read-only test.
+  std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
+                                                        /*readonly=*/true));
+  ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
+
+  SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
+  EXPECT_TRUE(shalloc2.IsReadonly());
+  EXPECT_EQ(TEST_ID, shalloc2.Id());
+  EXPECT_FALSE(shalloc2.IsFull());
+  EXPECT_FALSE(shalloc2.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter2(&shalloc2);
+  uint32_t type;
+  EXPECT_EQ(r123, iter2.GetNext(&type));
+  EXPECT_EQ(r789, iter2.GetNext(&type));
+  EXPECT_EQ(0U, iter2.GetNext(&type));
+
+  EXPECT_EQ(123U, shalloc2.GetType(r123));
+  EXPECT_EQ(654U, shalloc2.GetType(r456));
+  EXPECT_EQ(789U, shalloc2.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  shalloc2.GetMemoryInfo(&meminfo2);
+  EXPECT_EQ(meminfo1.total, meminfo2.total);
+  EXPECT_EQ(meminfo1.free, meminfo2.free);
+
+  // Read/write test.
+  std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
+                                                        /*readonly=*/false));
+  ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
+
+  SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
+  EXPECT_FALSE(shalloc3.IsReadonly());
+  EXPECT_EQ(TEST_ID, shalloc3.Id());
+  EXPECT_FALSE(shalloc3.IsFull());
+  EXPECT_FALSE(shalloc3.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter3(&shalloc3);
+  EXPECT_EQ(r123, iter3.GetNext(&type));
+  EXPECT_EQ(r789, iter3.GetNext(&type));
+  EXPECT_EQ(0U, iter3.GetNext(&type));
+
+  EXPECT_EQ(123U, shalloc3.GetType(r123));
+  EXPECT_EQ(654U, shalloc3.GetType(r456));
+  EXPECT_EQ(789U, shalloc3.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo3;
+  shalloc3.GetMemoryInfo(&meminfo3);
+  EXPECT_EQ(meminfo1.total, meminfo3.total);
+  EXPECT_EQ(meminfo1.free, meminfo3.free);
+
+  // Interconnectivity test.
+  Reference obj = shalloc3.Allocate(42, 42);
+  ASSERT_TRUE(obj);
+  shalloc3.MakeIterable(obj);
+  EXPECT_EQ(obj, iter2.GetNext(&type));
+  EXPECT_EQ(42U, type);
+}
+
+
+#if !defined(OS_NACL)
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.path().AppendASCII("persistent_memory");
+
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  Reference r123, r456, r789;
+  {
+    LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+    EXPECT_FALSE(local.IsReadonly());
+    r123 = local.Allocate(123, 123);
+    r456 = local.Allocate(456, 456);
+    r789 = local.Allocate(789, 789);
+    local.MakeIterable(r123);
+    local.ChangeType(r456, 654, 456);
+    local.MakeIterable(r789);
+    local.GetMemoryInfo(&meminfo1);
+    EXPECT_FALSE(local.IsFull());
+    EXPECT_FALSE(local.IsCorrupt());
+
+    File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+    ASSERT_TRUE(writer.IsValid());
+    writer.Write(0, (const char*)local.data(), local.used());
+  }
+
+  std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+  mmfile->Initialize(file_path);
+  EXPECT_TRUE(mmfile->IsValid());
+  const size_t mmlength = mmfile->length();
+  EXPECT_GE(meminfo1.total, mmlength);
+
+  FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true);
+  EXPECT_TRUE(file.IsReadonly());
+  EXPECT_EQ(TEST_ID, file.Id());
+  EXPECT_FALSE(file.IsFull());
+  EXPECT_FALSE(file.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter(&file);
+  uint32_t type;
+  EXPECT_EQ(r123, iter.GetNext(&type));
+  EXPECT_EQ(r789, iter.GetNext(&type));
+  EXPECT_EQ(0U, iter.GetNext(&type));
+
+  EXPECT_EQ(123U, file.GetType(r123));
+  EXPECT_EQ(654U, file.GetType(r456));
+  EXPECT_EQ(789U, file.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  file.GetMemoryInfo(&meminfo2);
+  EXPECT_GE(meminfo1.total, meminfo2.total);
+  EXPECT_GE(meminfo1.free, meminfo2.free);
+  EXPECT_EQ(mmlength, meminfo2.total);
+  EXPECT_EQ(0U, meminfo2.free);
+}
+
+TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.path().AppendASCII("extend_test");
+  MemoryMappedFile::Region region = {0, 16 << 10};  // 16KiB maximum size.
+
+  // Start with a small but valid file of persistent data.
+  ASSERT_FALSE(PathExists(file_path));
+  {
+    LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+    local.Allocate(1, 1);
+    local.Allocate(11, 11);
+
+    File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+    ASSERT_TRUE(writer.IsValid());
+    writer.Write(0, (const char*)local.data(), local.used());
+  }
+  ASSERT_TRUE(PathExists(file_path));
+  int64_t before_size;
+  ASSERT_TRUE(GetFileSize(file_path, &before_size));
+
+  // Map it as an extendable read/write file and append to it.
+  {
+    std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+    mmfile->Initialize(
+        File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+        region, MemoryMappedFile::READ_WRITE_EXTEND);
+    FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
+                                            "", false);
+    EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
+
+    allocator.Allocate(111, 111);
+    EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
+  }
+
+  // Validate that append worked.
+  int64_t after_size;
+  ASSERT_TRUE(GetFileSize(file_path, &after_size));
+  EXPECT_LT(before_size, after_size);
+
+  // Verify that it's still an acceptable file.
+  {
+    std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+    mmfile->Initialize(
+        File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
+        region, MemoryMappedFile::READ_WRITE_EXTEND);
+    EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
+    EXPECT_TRUE(
+        FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
+  }
+}
+
+TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
+  const uint32_t kAllocAlignment =
+      PersistentMemoryAllocatorTest::GetAllocAlignment();
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+  local.MakeIterable(local.Allocate(1, 1));
+  local.MakeIterable(local.Allocate(11, 11));
+  const size_t minsize = local.used();
+  std::unique_ptr<char[]> garbage(new char[minsize]);
+  RandBytes(garbage.get(), minsize);
+
+  std::unique_ptr<MemoryMappedFile> mmfile;
+  char filename[100];
+  for (size_t filesize = minsize; filesize > 0; --filesize) {
+    strings::SafeSPrintf(filename, "memory_%d_A", filesize);
+    FilePath file_path = temp_dir.path().AppendASCII(filename);
+    ASSERT_FALSE(PathExists(file_path));
+    {
+      File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+      ASSERT_TRUE(writer.IsValid());
+      writer.Write(0, (const char*)local.data(), filesize);
+    }
+    ASSERT_TRUE(PathExists(file_path));
+
+    // Request read/write access for some sizes that are a multple of the
+    // allocator's alignment size. The allocator is strict about file size
+    // being a multiple of its internal alignment when doing read/write access.
+    const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
+    const uint32_t file_flags =
+        File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
+    const MemoryMappedFile::Access map_access =
+        read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
+
+    mmfile.reset(new MemoryMappedFile());
+    mmfile->Initialize(File(file_path, file_flags), map_access);
+    EXPECT_EQ(filesize, mmfile->length());
+    if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
+      // Make sure construction doesn't crash. It will, however, cause
+      // error messages warning about about a corrupted memory segment.
+      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+                                              read_only);
+      // Also make sure that iteration doesn't crash.
+      PersistentMemoryAllocator::Iterator iter(&allocator);
+      uint32_t type_id;
+      Reference ref;
+      while ((ref = iter.GetNext(&type_id)) != 0) {
+        const char* data = allocator.GetAsObject<char>(ref, 0);
+        uint32_t type = allocator.GetType(ref);
+        size_t size = allocator.GetAllocSize(ref);
+        // Ensure compiler can't optimize-out above variables.
+        (void)data;
+        (void)type;
+        (void)size;
+      }
+
+      // Ensure that short files are detected as corrupt and full files are not.
+      EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
+    } else {
+      // For filesize >= minsize, the file must be acceptable. This
+      // else clause (file-not-acceptable) should be reached only if
+      // filesize < minsize.
+      EXPECT_LT(filesize, minsize);
+    }
+
+    strings::SafeSPrintf(filename, "memory_%d_B", filesize);
+    file_path = temp_dir.path().AppendASCII(filename);
+    ASSERT_FALSE(PathExists(file_path));
+    {
+      File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+      ASSERT_TRUE(writer.IsValid());
+      writer.Write(0, (const char*)garbage.get(), filesize);
+    }
+    ASSERT_TRUE(PathExists(file_path));
+
+    mmfile.reset(new MemoryMappedFile());
+    mmfile->Initialize(File(file_path, file_flags), map_access);
+    EXPECT_EQ(filesize, mmfile->length());
+    if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
+      // Make sure construction doesn't crash. It will, however, cause
+      // error messages warning about about a corrupted memory segment.
+      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
+                                              read_only);
+      EXPECT_TRUE(allocator.IsCorrupt());  // Garbage data so it should be.
+    } else {
+      // For filesize >= minsize, the file must be acceptable. This
+      // else clause (file-not-acceptable) should be reached only if
+      // filesize < minsize.
+      EXPECT_GT(minsize, filesize);
+    }
+  }
+}
+#endif  // !defined(OS_NACL)
+
+}  // namespace base
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
new file mode 100644
index 0000000..15f83cd
--- /dev/null
+++ b/base/metrics/persistent_sample_map.cc
@@ -0,0 +1,289 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+namespace {
+
+// An iterator for going through a PersistentSampleMap. The logic here is
+// identical to that of SampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class PersistentSampleMapIterator : public SampleCountIterator {
+ public:
+  typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
+      SampleToCountMap;
+
+  explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
+  ~PersistentSampleMapIterator() override;
+
+  // SampleCountIterator:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           HistogramBase::Sample* max,
+           HistogramBase::Count* count) const override;
+
+ private:
+  void SkipEmptyBuckets();
+
+  SampleToCountMap::const_iterator iter_;
+  const SampleToCountMap::const_iterator end_;
+};
+
+PersistentSampleMapIterator::PersistentSampleMapIterator(
+    const SampleToCountMap& sample_counts)
+    : iter_(sample_counts.begin()),
+      end_(sample_counts.end()) {
+  SkipEmptyBuckets();
+}
+
+PersistentSampleMapIterator::~PersistentSampleMapIterator() {}
+
+bool PersistentSampleMapIterator::Done() const {
+  return iter_ == end_;
+}
+
+void PersistentSampleMapIterator::Next() {
+  DCHECK(!Done());
+  ++iter_;
+  SkipEmptyBuckets();
+}
+
+void PersistentSampleMapIterator::Get(Sample* min,
+                                      Sample* max,
+                                      Count* count) const {
+  DCHECK(!Done());
+  if (min)
+    *min = iter_->first;
+  if (max)
+    *max = iter_->first + 1;
+  if (count)
+    *count = *iter_->second;
+}
+
+void PersistentSampleMapIterator::SkipEmptyBuckets() {
+  while (!Done() && *iter_->second == 0) {
+    ++iter_;
+  }
+}
+
+// This structure holds an entry for a PersistentSampleMap within a persistent
+// memory allocator. The "id" must be unique across all maps held by an
+// allocator or they will get attached to the wrong sample map.
+struct SampleRecord {
+  uint64_t id;   // Unique identifier of owner.
+  Sample value;  // The value for which this record holds a count.
+  Count count;   // The count associated with the above value.
+};
+
+// The type-id used to identify sample records inside an allocator.
+const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1;  // SHA1(SampleRecord) v1
+
+}  // namespace
+
+PersistentSampleMap::PersistentSampleMap(
+    uint64_t id,
+    PersistentHistogramAllocator* allocator,
+    Metadata* meta)
+    : HistogramSamples(id, meta), allocator_(allocator) {}
+
+PersistentSampleMap::~PersistentSampleMap() {
+  if (records_)
+    records_->Release(this);
+}
+
+void PersistentSampleMap::Accumulate(Sample value, Count count) {
+  *GetOrCreateSampleCountStorage(value) += count;
+  IncreaseSum(static_cast<int64_t>(count) * value);
+  IncreaseRedundantCount(count);
+}
+
+Count PersistentSampleMap::GetCount(Sample value) const {
+  // Have to override "const" to make sure all samples have been loaded before
+  // being able to know what value to return.
+  Count* count_pointer =
+      const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
+  return count_pointer ? *count_pointer : 0;
+}
+
+Count PersistentSampleMap::TotalCount() const {
+  // Have to override "const" in order to make sure all samples have been
+  // loaded before trying to iterate over the map.
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+
+  Count count = 0;
+  for (const auto& entry : sample_counts_) {
+    count += *entry.second;
+  }
+  return count;
+}
+
+std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
+  // Have to override "const" in order to make sure all samples have been
+  // loaded before trying to iterate over the map.
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+  return WrapUnique(new PersistentSampleMapIterator(sample_counts_));
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::GetNextPersistentRecord(
+    PersistentMemoryAllocator::Iterator& iterator,
+    uint64_t* sample_map_id) {
+  PersistentMemoryAllocator::Reference ref =
+      iterator.GetNextOfType(kTypeIdSampleRecord);
+  const SampleRecord* record =
+      iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+  if (!record)
+    return 0;
+
+  *sample_map_id = record->id;
+  return ref;
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::CreatePersistentRecord(
+    PersistentMemoryAllocator* allocator,
+    uint64_t sample_map_id,
+    Sample value) {
+  PersistentMemoryAllocator::Reference ref =
+      allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
+  SampleRecord* record =
+      allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+
+  if (!record) {
+    NOTREACHED() << "full=" << allocator->IsFull()
+                 << ", corrupt=" << allocator->IsCorrupt();
+    return 0;
+  }
+
+  record->id = sample_map_id;
+  record->value = value;
+  record->count = 0;
+  allocator->MakeIterable(ref);
+  return ref;
+}
+
+bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
+                                          Operator op) {
+  Sample min;
+  Sample max;
+  Count count;
+  for (; !iter->Done(); iter->Next()) {
+    iter->Get(&min, &max, &count);
+    if (min + 1 != max)
+      return false;  // SparseHistogram only supports bucket with size 1.
+
+    *GetOrCreateSampleCountStorage(min) +=
+        (op == HistogramSamples::ADD) ? count : -count;
+  }
+  return true;
+}
+
+Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
+  // If |value| is already in the map, just return that.
+  auto it = sample_counts_.find(value);
+  if (it != sample_counts_.end())
+    return it->second;
+
+  // Import any new samples from persistent memory looking for the value.
+  return ImportSamples(value, false);
+}
+
+Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
+  // Get any existing count storage.
+  Count* count_pointer = GetSampleCountStorage(value);
+  if (count_pointer)
+    return count_pointer;
+
+  // Create a new record in persistent memory for the value. |records_| will
+  // have been initialized by the GetSampleCountStorage() call above.
+  DCHECK(records_);
+  PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
+  if (!ref) {
+    // If a new record could not be created then the underlying allocator is
+    // full or corrupt. Instead, allocate the counter from the heap. This
+    // sample will not be persistent, will not be shared, and will leak...
+    // but it's better than crashing.
+    count_pointer = new Count(0);
+    sample_counts_[value] = count_pointer;
+    return count_pointer;
+  }
+
+  // A race condition between two independent processes (i.e. two independent
+  // histogram objects sharing the same sample data) could cause two of the
+  // above records to be created. The allocator, however, forces a strict
+  // ordering on iterable objects so use the import method to actually add the
+  // just-created record. This ensures that all PersistentSampleMap objects
+  // will always use the same record, whichever was first made iterable.
+  // Thread-safety within a process where multiple threads use the same
+  // histogram object is delegated to the controlling histogram object which,
+  // for sparse histograms, is a lock object.
+  count_pointer = ImportSamples(value, false);
+  DCHECK(count_pointer);
+  return count_pointer;
+}
+
+PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
+  // The |records_| pointer is lazily fetched from the |allocator_| only on
+  // first use. Sometimes duplicate histograms are created by race conditions
+  // and if both were to grab the records object, there would be a conflict.
+  // Use of a histogram, and thus a call to this method, won't occur until
+  // after the histogram has been de-dup'd.
+  if (!records_)
+    records_ = allocator_->UseSampleMapRecords(id(), this);
+  return records_;
+}
+
+Count* PersistentSampleMap::ImportSamples(Sample until_value,
+                                          bool import_everything) {
+  Count* found_count = nullptr;
+  PersistentMemoryAllocator::Reference ref;
+  PersistentSampleMapRecords* records = GetRecords();
+  while ((ref = records->GetNext()) != 0) {
+    SampleRecord* record =
+        records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+    if (!record)
+      continue;
+
+    DCHECK_EQ(id(), record->id);
+
+    // Check if the record's value is already known.
+    if (!ContainsKey(sample_counts_, record->value)) {
+      // No: Add it to map of known values.
+      sample_counts_[record->value] = &record->count;
+    } else {
+      // Yes: Ignore it; it's a duplicate caused by a race condition -- see
+      // code & comment in GetOrCreateSampleCountStorage() for details.
+      // Check that nothing ever operated on the duplicate record.
+      DCHECK_EQ(0, record->count);
+    }
+
+    // Check if it's the value being searched for and, if so, keep a pointer
+    // to return later. Stop here unless everything is being imported.
+    // Because race conditions can cause multiple records for a single value,
+    // be sure to return the first one found.
+    if (record->value == until_value) {
+      if (!found_count)
+        found_count = &record->count;
+      if (!import_everything)
+        break;
+    }
+  }
+
+  return found_count;
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
new file mode 100644
index 0000000..3c175db
--- /dev/null
+++ b/base/metrics/persistent_sample_map.h
@@ -0,0 +1,110 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PersistentSampleMap implements HistogramSamples interface. It is used
+// by the SparseHistogram class to store samples in persistent memory which
+// allows it to be shared between processes or live across restarts.
+
+#ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+#define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_memory_allocator.h"
+
+namespace base {
+
+class PersistentHistogramAllocator;
+class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
+
+// The logic here is similar to that of SampleMap but with different data
+// structures. Changes here likely need to be duplicated there.
+class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
+ public:
+  // Constructs a persistent sample map using a PersistentHistogramAllocator
+  // as the data source for persistent records.
+  PersistentSampleMap(uint64_t id,
+                      PersistentHistogramAllocator* allocator,
+                      Metadata* meta);
+
+  ~PersistentSampleMap() override;
+
+  // HistogramSamples:
+  void Accumulate(HistogramBase::Sample value,
+                  HistogramBase::Count count) override;
+  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+  HistogramBase::Count TotalCount() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+  // Uses a persistent-memory |iterator| to locate and return information about
+  // the next record holding information for a PersistentSampleMap. The record
+  // could be for any Map so return the |sample_map_id| as well.
+  static PersistentMemoryAllocator::Reference GetNextPersistentRecord(
+      PersistentMemoryAllocator::Iterator& iterator,
+      uint64_t* sample_map_id);
+
+  // Creates a new record in an |allocator| storing count information for a
+  // specific sample |value| of a histogram with the given |sample_map_id|.
+  static PersistentMemoryAllocator::Reference CreatePersistentRecord(
+      PersistentMemoryAllocator* allocator,
+      uint64_t sample_map_id,
+      HistogramBase::Sample value);
+
+ protected:
+  // Performs arithemetic. |op| is ADD or SUBTRACT.
+  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
+
+  // Gets a pointer to a "count" corresponding to a given |value|. Returns NULL
+  // if sample does not exist.
+  HistogramBase::Count* GetSampleCountStorage(HistogramBase::Sample value);
+
+  // Gets a pointer to a "count" corresponding to a given |value|, creating
+  // the sample (initialized to zero) if it does not already exists.
+  HistogramBase::Count* GetOrCreateSampleCountStorage(
+      HistogramBase::Sample value);
+
+ private:
+  // Gets the object that manages persistent records. This returns the
+  // |records_| member after first initializing it if necessary.
+  PersistentSampleMapRecords* GetRecords();
+
+  // Imports samples from persistent memory by iterating over all sample
+  // records found therein, adding them to the sample_counts_ map. If a
+  // count for the sample |until_value| is found, stop the import and return
+  // a pointer to that counter. If that value is not found, null will be
+  // returned after all currently available samples have been loaded. Pass
+  // true for |import_everything| to force the importing of all available
+  // samples even if a match is found.
+  HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value,
+                                      bool import_everything);
+
+  // All created/loaded sample values and their associated counts. The storage
+  // for the actual Count numbers is owned by the |records_| object and its
+  // underlying allocator.
+  std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
+
+  // The allocator that manages histograms inside persistent memory. This is
+  // owned externally and is expected to live beyond the life of this object.
+  PersistentHistogramAllocator* allocator_;
+
+  // The object that manages sample records inside persistent memory. This is
+  // owned by the |allocator_| object (above) and so, like it, is expected to
+  // live beyond the life of this object. This value is lazily-initialized on
+  // first use via the GetRecords() accessor method.
+  PersistentSampleMapRecords* records_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
new file mode 100644
index 0000000..beb72e5
--- /dev/null
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -0,0 +1,263 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+std::unique_ptr<PersistentHistogramAllocator> CreateHistogramAllocator(
+    size_t bytes) {
+  return WrapUnique(new PersistentHistogramAllocator(
+      WrapUnique(new LocalPersistentMemoryAllocator(bytes, 0, ""))));
+}
+
+std::unique_ptr<PersistentHistogramAllocator> DuplicateHistogramAllocator(
+    PersistentHistogramAllocator* original) {
+  return WrapUnique(
+      new PersistentHistogramAllocator(WrapUnique(new PersistentMemoryAllocator(
+          const_cast<void*>(original->data()), original->length(), 0,
+          original->Id(), original->Name(), false))));
+}
+
+TEST(PersistentSampleMapTest, AccumulateTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(1, -200);
+  EXPECT_EQ(-100, samples.GetCount(1));
+  EXPECT_EQ(200, samples.GetCount(2));
+
+  EXPECT_EQ(300, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+
+  samples.Accumulate(250000000, 100);
+  samples.Accumulate(500000000, 200);
+  samples.Accumulate(250000000, -200);
+  EXPECT_EQ(-100, samples.GetCount(250000000));
+  EXPECT_EQ(200, samples.GetCount(500000000));
+
+  EXPECT_EQ(75000000000LL, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, AddSubtractTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta1;
+  PersistentSampleMap samples1(1, allocator1.get(), &meta1);
+  samples1.Accumulate(1, 100);
+  samples1.Accumulate(2, 100);
+  samples1.Accumulate(3, 100);
+
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  HistogramSamples::Metadata meta2;
+  PersistentSampleMap samples2(2, allocator2.get(), &meta2);
+  samples2.Accumulate(1, 200);
+  samples2.Accumulate(2, 200);
+  samples2.Accumulate(4, 200);
+
+  samples1.Add(samples2);
+  EXPECT_EQ(300, samples1.GetCount(1));
+  EXPECT_EQ(300, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(200, samples1.GetCount(4));
+  EXPECT_EQ(2000, samples1.sum());
+  EXPECT_EQ(900, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  samples1.Subtract(samples2);
+  EXPECT_EQ(100, samples1.GetCount(1));
+  EXPECT_EQ(100, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(0, samples1.GetCount(4));
+  EXPECT_EQ(600, samples1.sum());
+  EXPECT_EQ(300, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, PersistenceTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta12;
+  PersistentSampleMap samples1(12, allocator1.get(), &meta12);
+  samples1.Accumulate(1, 100);
+  samples1.Accumulate(2, 200);
+  samples1.Accumulate(1, -200);
+  samples1.Accumulate(-1, 1);
+  EXPECT_EQ(-100, samples1.GetCount(1));
+  EXPECT_EQ(200, samples1.GetCount(2));
+  EXPECT_EQ(1, samples1.GetCount(-1));
+  EXPECT_EQ(299, samples1.sum());
+  EXPECT_EQ(101, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  PersistentSampleMap samples2(12, allocator2.get(), &meta12);
+  EXPECT_EQ(samples1.id(), samples2.id());
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+  EXPECT_EQ(-100, samples2.GetCount(1));
+  EXPECT_EQ(200, samples2.GetCount(2));
+  EXPECT_EQ(1, samples2.GetCount(-1));
+  EXPECT_EQ(299, samples2.sum());
+  EXPECT_EQ(101, samples2.TotalCount());
+  EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
+
+  samples1.Accumulate(-1, -1);
+  EXPECT_EQ(0, samples2.GetCount(3));
+  EXPECT_EQ(0, samples1.GetCount(3));
+  samples2.Accumulate(3, 300);
+  EXPECT_EQ(300, samples2.GetCount(3));
+  EXPECT_EQ(300, samples1.GetCount(3));
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+
+  EXPECT_EQ(0, samples2.GetCount(4));
+  EXPECT_EQ(0, samples1.GetCount(4));
+  samples1.Accumulate(4, 400);
+  EXPECT_EQ(400, samples2.GetCount(4));
+  EXPECT_EQ(400, samples1.GetCount(4));
+  samples2.Accumulate(4, 4000);
+  EXPECT_EQ(4400, samples2.GetCount(4));
+  EXPECT_EQ(4400, samples1.GetCount(4));
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+}
+
+TEST(PersistentSampleMapIteratorTest, IterateTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(4, -300);
+  samples.Accumulate(5, 0);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+  HistogramBase::Sample min;
+  HistogramBase::Sample max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(2, max);
+  EXPECT_EQ(100, count);
+  EXPECT_FALSE(it->GetBucketIndex(NULL));
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(2, min);
+  EXPECT_EQ(3, max);
+  EXPECT_EQ(200, count);
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(4, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(-300, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta1;
+  PersistentSampleMap samples1(1, allocator1.get(), &meta1);
+  samples1.Accumulate(5, 1);
+  samples1.Accumulate(10, 2);
+  samples1.Accumulate(15, 3);
+  samples1.Accumulate(20, 4);
+  samples1.Accumulate(25, 5);
+
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  HistogramSamples::Metadata meta2;
+  PersistentSampleMap samples2(2, allocator2.get(), &meta2);
+  samples2.Accumulate(5, 1);
+  samples2.Accumulate(20, 4);
+  samples2.Accumulate(25, 5);
+
+  samples1.Subtract(samples2);
+
+  std::unique_ptr<SampleCountIterator> it = samples1.Iterator();
+  EXPECT_FALSE(it->Done());
+
+  HistogramBase::Sample min;
+  HistogramBase::Sample max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(10, min);
+  EXPECT_EQ(11, max);
+  EXPECT_EQ(2, count);
+
+  it->Next();
+  EXPECT_FALSE(it->Done());
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(15, min);
+  EXPECT_EQ(16, max);
+  EXPECT_EQ(3, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+// Only run this test on builds that support catching a DCHECK crash.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
+
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
+
+  EXPECT_TRUE(it->Done());
+
+  HistogramBase::Sample min;
+  HistogramBase::Sample max;
+  HistogramBase::Count count;
+  EXPECT_DEATH(it->Get(&min, &max, &count), "");
+
+  EXPECT_DEATH(it->Next(), "");
+
+  samples.Accumulate(1, 100);
+  it = samples.Iterator();
+  EXPECT_FALSE(it->Done());
+}
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+}  // namespace
+}  // namespace base
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
index a691243..8abd01e 100644
--- a/base/metrics/sample_map.cc
+++ b/base/metrics/sample_map.cc
@@ -5,57 +5,40 @@
 #include "base/metrics/sample_map.h"
 
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/stl_util.h"
 
 namespace base {
 
 typedef HistogramBase::Count Count;
 typedef HistogramBase::Sample Sample;
 
-SampleMap::SampleMap() : SampleMap(0) {}
+namespace {
 
-SampleMap::SampleMap(uint64_t id) : HistogramSamples(id) {}
+// An iterator for going through a SampleMap. The logic here is identical
+// to that of PersistentSampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class SampleMapIterator : public SampleCountIterator {
+ public:
+  typedef std::map<HistogramBase::Sample, HistogramBase::Count>
+      SampleToCountMap;
 
-SampleMap::~SampleMap() {}
+  explicit SampleMapIterator(const SampleToCountMap& sample_counts);
+  ~SampleMapIterator() override;
 
-void SampleMap::Accumulate(Sample value, Count count) {
-  sample_counts_[value] += count;
-  IncreaseSum(count * value);
-  IncreaseRedundantCount(count);
-}
+  // SampleCountIterator:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           HistogramBase::Sample* max,
+           HistogramBase::Count* count) const override;
 
-Count SampleMap::GetCount(Sample value) const {
-  std::map<Sample, Count>::const_iterator it = sample_counts_.find(value);
-  if (it == sample_counts_.end())
-    return 0;
-  return it->second;
-}
+ private:
+  void SkipEmptyBuckets();
 
-Count SampleMap::TotalCount() const {
-  Count count = 0;
-  for (const auto& entry : sample_counts_) {
-    count += entry.second;
-  }
-  return count;
-}
-
-scoped_ptr<SampleCountIterator> SampleMap::Iterator() const {
-  return scoped_ptr<SampleCountIterator>(new SampleMapIterator(sample_counts_));
-}
-
-bool SampleMap::AddSubtractImpl(SampleCountIterator* iter,
-                                HistogramSamples::Operator op) {
-  Sample min;
-  Sample max;
-  Count count;
-  for (; !iter->Done(); iter->Next()) {
-    iter->Get(&min, &max, &count);
-    if (min + 1 != max)
-      return false;  // SparseHistogram only supports bucket with size 1.
-
-    sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
-  }
-  return true;
-}
+  SampleToCountMap::const_iterator iter_;
+  const SampleToCountMap::const_iterator end_;
+};
 
 SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
     : iter_(sample_counts.begin()),
@@ -77,11 +60,11 @@
 
 void SampleMapIterator::Get(Sample* min, Sample* max, Count* count) const {
   DCHECK(!Done());
-  if (min != NULL)
+  if (min)
     *min = iter_->first;
-  if (max != NULL)
+  if (max)
     *max = iter_->first + 1;
-  if (count != NULL)
+  if (count)
     *count = iter_->second;
 }
 
@@ -91,4 +74,51 @@
   }
 }
 
+}  // namespace
+
+SampleMap::SampleMap() : SampleMap(0) {}
+
+SampleMap::SampleMap(uint64_t id) : HistogramSamples(id) {}
+
+SampleMap::~SampleMap() {}
+
+void SampleMap::Accumulate(Sample value, Count count) {
+  sample_counts_[value] += count;
+  IncreaseSum(static_cast<int64_t>(count) * value);
+  IncreaseRedundantCount(count);
+}
+
+Count SampleMap::GetCount(Sample value) const {
+  std::map<Sample, Count>::const_iterator it = sample_counts_.find(value);
+  if (it == sample_counts_.end())
+    return 0;
+  return it->second;
+}
+
+Count SampleMap::TotalCount() const {
+  Count count = 0;
+  for (const auto& entry : sample_counts_) {
+    count += entry.second;
+  }
+  return count;
+}
+
+std::unique_ptr<SampleCountIterator> SampleMap::Iterator() const {
+  return WrapUnique(new SampleMapIterator(sample_counts_));
+}
+
+bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
+  Sample min;
+  Sample max;
+  Count count;
+  for (; !iter->Done(); iter->Next()) {
+    iter->Get(&min, &max, &count);
+    if (min + 1 != max)
+      return false;  // SparseHistogram only supports bucket with size 1.
+
+    sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
+  }
+  return true;
+}
+
 }  // namespace base
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
index da536e3..7458e05 100644
--- a/base/metrics/sample_map.h
+++ b/base/metrics/sample_map.h
@@ -11,32 +11,33 @@
 #include <stdint.h>
 
 #include <map>
+#include <memory>
 
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 
 namespace base {
 
+// The logic here is similar to that of PersistentSampleMap but with different
+// data structures. Changes here likely need to be duplicated there.
 class BASE_EXPORT SampleMap : public HistogramSamples {
  public:
   SampleMap();
   explicit SampleMap(uint64_t id);
   ~SampleMap() override;
 
-  // HistogramSamples implementation:
+  // HistogramSamples:
   void Accumulate(HistogramBase::Sample value,
                   HistogramBase::Count count) override;
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
-  scoped_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
 
  protected:
-  bool AddSubtractImpl(
-      SampleCountIterator* iter,
-      HistogramSamples::Operator op) override;  // |op| is ADD or SUBTRACT.
+  // Performs arithemetic. |op| is ADD or SUBTRACT.
+  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
 
  private:
   std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
@@ -44,28 +45,6 @@
   DISALLOW_COPY_AND_ASSIGN(SampleMap);
 };
 
-class BASE_EXPORT SampleMapIterator : public SampleCountIterator {
- public:
-  typedef std::map<HistogramBase::Sample, HistogramBase::Count>
-      SampleToCountMap;
-
-  explicit SampleMapIterator(const SampleToCountMap& sample_counts);
-  ~SampleMapIterator() override;
-
-  // SampleCountIterator implementation:
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           HistogramBase::Sample* max,
-           HistogramBase::Count* count) const override;
-
- private:
-  void SkipEmptyBuckets();
-
-  SampleToCountMap::const_iterator iter_;
-  const SampleToCountMap::const_iterator end_;
-};
-
 }  // namespace base
 
 #endif  // BASE_METRICS_SAMPLE_MAP_H_
diff --git a/base/metrics/sample_map_unittest.cc b/base/metrics/sample_map_unittest.cc
index c941d65..8f57710 100644
--- a/base/metrics/sample_map_unittest.cc
+++ b/base/metrics/sample_map_unittest.cc
@@ -4,7 +4,8 @@
 
 #include "base/metrics/sample_map.h"
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -24,6 +25,20 @@
   EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
 }
 
+TEST(SampleMapTest, Accumulate_LargeValuesDontOverflow) {
+  SampleMap samples(1);
+
+  samples.Accumulate(250000000, 100);
+  samples.Accumulate(500000000, 200);
+  samples.Accumulate(250000000, -200);
+  EXPECT_EQ(-100, samples.GetCount(250000000));
+  EXPECT_EQ(200, samples.GetCount(500000000));
+
+  EXPECT_EQ(75000000000LL, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
 TEST(SampleMapTest, AddSubtractTest) {
   SampleMap samples1(1);
   SampleMap samples2(2);
@@ -62,7 +77,7 @@
   samples.Accumulate(4, -300);
   samples.Accumulate(5, 0);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   HistogramBase::Sample min;
   HistogramBase::Sample max;
@@ -105,7 +120,7 @@
 
   samples.Subtract(samples2);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
   EXPECT_FALSE(it->Done());
 
   HistogramBase::Sample min;
@@ -134,7 +149,7 @@
 TEST(SampleMapIteratorDeathTest, IterateDoneTest) {
   SampleMap samples(1);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   EXPECT_TRUE(it->Done());
 
diff --git a/base/metrics/sample_vector.cc b/base/metrics/sample_vector.cc
index 6120c50..7b056cb 100644
--- a/base/metrics/sample_vector.cc
+++ b/base/metrics/sample_vector.cc
@@ -26,7 +26,7 @@
 
 SampleVector::SampleVector(uint64_t id,
                            HistogramBase::AtomicCount* counts,
-                           size_t /* counts_size */,
+                           size_t /*counts_size*/,
                            Metadata* meta,
                            const BucketRanges* bucket_ranges)
     : HistogramSamples(id, meta),
@@ -41,9 +41,8 @@
 
 void SampleVector::Accumulate(Sample value, Count count) {
   size_t bucket_index = GetBucketIndex(value);
-  subtle::NoBarrier_Store(&counts_[bucket_index],
-      subtle::NoBarrier_Load(&counts_[bucket_index]) + count);
-  IncreaseSum(count * value);
+  subtle::NoBarrier_AtomicIncrement(&counts_[bucket_index], count);
+  IncreaseSum(static_cast<int64_t>(count) * value);
   IncreaseRedundantCount(count);
 }
 
@@ -65,8 +64,8 @@
   return subtle::NoBarrier_Load(&counts_[bucket_index]);
 }
 
-scoped_ptr<SampleCountIterator> SampleVector::Iterator() const {
-  return scoped_ptr<SampleCountIterator>(
+std::unique_ptr<SampleCountIterator> SampleVector::Iterator() const {
+  return std::unique_ptr<SampleCountIterator>(
       new SampleVectorIterator(counts_, counts_size_, bucket_ranges_));
 }
 
@@ -83,10 +82,8 @@
     if (min == bucket_ranges_->range(index) &&
         max == bucket_ranges_->range(index + 1)) {
       // Sample matches this bucket!
-      HistogramBase::Count old_counts =
-          subtle::NoBarrier_Load(&counts_[index]);
-      subtle::NoBarrier_Store(&counts_[index],
-          old_counts + ((op ==  HistogramSamples::ADD) ? count : -count));
+      subtle::NoBarrier_AtomicIncrement(
+          &counts_[index], op == HistogramSamples::ADD ? count : -count);
       iter->Next();
     } else if (min > bucket_ranges_->range(index)) {
       // Sample is larger than current bucket range. Try next.
diff --git a/base/metrics/sample_vector.h b/base/metrics/sample_vector.h
index 0317869..ee26c52 100644
--- a/base/metrics/sample_vector.h
+++ b/base/metrics/sample_vector.h
@@ -11,12 +11,12 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/compiler_specific.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 
@@ -40,7 +40,7 @@
                   HistogramBase::Count count) override;
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
-  scoped_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
 
   // Get count of a specific bucket.
   HistogramBase::Count GetCountAtIndex(size_t bucket_index) const;
@@ -54,6 +54,7 @@
 
  private:
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
+  FRIEND_TEST_ALL_PREFIXES(SharedHistogramTest, CorruptSampleCounts);
 
   // In the case where this class manages the memory, here it is.
   std::vector<HistogramBase::AtomicCount> local_counts_;
diff --git a/base/metrics/sample_vector_unittest.cc b/base/metrics/sample_vector_unittest.cc
index 744cbfa..02e48aa 100644
--- a/base/metrics/sample_vector_unittest.cc
+++ b/base/metrics/sample_vector_unittest.cc
@@ -7,9 +7,9 @@
 #include <limits.h>
 #include <stddef.h>
 
+#include <memory>
 #include <vector>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -44,6 +44,33 @@
   EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
 }
 
+TEST(SampleVectorTest, Accumulate_LargeValuesDontOverflow) {
+  // Custom buckets: [1, 250000000) [250000000, 500000000)
+  BucketRanges ranges(3);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 250000000);
+  ranges.set_range(2, 500000000);
+  SampleVector samples(1, &ranges);
+
+  samples.Accumulate(240000000, 200);
+  samples.Accumulate(249999999, -300);
+  EXPECT_EQ(-100, samples.GetCountAtIndex(0));
+
+  samples.Accumulate(250000000, 200);
+  EXPECT_EQ(200, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(23000000300LL, samples.sum());
+  EXPECT_EQ(100, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+
+  samples.Accumulate(250000000, -100);
+  EXPECT_EQ(100, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(-1999999700LL, samples.sum());
+  EXPECT_EQ(0, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+}
+
 TEST(SampleVectorTest, AddSubtractTest) {
   // Custom buckets: [0, 1) [1, 2) [2, 3) [3, INT_MAX)
   BucketRanges ranges(5);
@@ -217,7 +244,7 @@
   samples.Accumulate(1, 1);
   samples.Accumulate(2, 2);
   samples.Accumulate(3, 3);
-  scoped_ptr<SampleCountIterator> it2 = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it2 = samples.Iterator();
 
   int i;
   for (i = 1; !it2->Done(); i++, it2->Next()) {
@@ -244,7 +271,7 @@
   ranges.set_range(4, INT_MAX);
   SampleVector samples(1, &ranges);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   EXPECT_TRUE(it->Done());
 
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index 37ea5e7..3c1222d 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -6,7 +6,10 @@
 
 #include <utility>
 
+#include "base/memory/ptr_util.h"
 #include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_sample_map.h"
 #include "base/metrics/sample_map.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
@@ -22,22 +25,67 @@
 HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
                                            int32_t flags) {
   HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-
   if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    HistogramBase* tentative_histogram = new SparseHistogram(name);
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+    // Try to create the histogram using a "persistent" allocator. As of
+    // 2016-02-25, the availability of such is controlled by a base::Feature
+    // that is off by default. If the allocator doesn't exist or if
+    // allocating from it fails, code below will allocate the histogram from
+    // the process heap.
+    PersistentMemoryAllocator::Reference histogram_ref = 0;
+    std::unique_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+    if (allocator) {
+      tentative_histogram = allocator->AllocateHistogram(
+          SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
+    }
+
+    // Handle the case where no persistent allocator is present or the
+    // persistent allocation fails (perhaps because it is full).
+    if (!tentative_histogram) {
+      DCHECK(!histogram_ref);  // Should never have been set.
+      DCHECK(!allocator);      // Shouldn't have failed.
+      flags &= ~HistogramBase::kIsPersistent;
+      tentative_histogram.reset(new SparseHistogram(name));
+      tentative_histogram->SetFlags(flags);
+    }
+
+    // Register this histogram with the StatisticsRecorder. Keep a copy of
+    // the pointer value to tell later whether the locally created histogram
+    // was registered or deleted. The type is "void" because it could point
+    // to released memory after the following line.
+    const void* tentative_histogram_ptr = tentative_histogram.get();
+    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+        tentative_histogram.release());
+
+    // Persistent histograms need some follow-up processing.
+    if (histogram_ref) {
+      allocator->FinalizeHistogram(histogram_ref,
+                                   histogram == tentative_histogram_ptr);
+    }
+
+    ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
+  } else {
+    ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
   }
+
   DCHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
   return histogram;
 }
 
+// static
+std::unique_ptr<HistogramBase> SparseHistogram::PersistentCreate(
+    PersistentHistogramAllocator* allocator,
+    const std::string& name,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return WrapUnique(
+      new SparseHistogram(allocator, name, meta, logged_meta));
+}
+
 SparseHistogram::~SparseHistogram() {}
 
 uint64_t SparseHistogram::name_hash() const {
-  return samples_.id();
+  return samples_->id();
 }
 
 HistogramType SparseHistogram::GetHistogramType() const {
@@ -45,9 +93,9 @@
 }
 
 bool SparseHistogram::HasConstructionArguments(
-    Sample /* expected_minimum */,
-    Sample /* expected_maximum */,
-    size_t /* expected_bucket_count */) const {
+    Sample /*expected_minimum*/,
+    Sample /*expected_maximum*/,
+    uint32_t /*expected_bucket_count*/) const {
   // SparseHistogram never has min/max/bucket_count limit.
   return false;
 }
@@ -63,28 +111,54 @@
   }
   {
     base::AutoLock auto_lock(lock_);
-    samples_.Accumulate(value, count);
+    samples_->Accumulate(value, count);
   }
 
   FindAndRunCallback(value);
 }
 
-scoped_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
-  scoped_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
 
   base::AutoLock auto_lock(lock_);
-  snapshot->Add(samples_);
+  snapshot->Add(*samples_);
+  return std::move(snapshot);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
+  DCHECK(!final_delta_created_);
+
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+  base::AutoLock auto_lock(lock_);
+  snapshot->Add(*samples_);
+
+  // Subtract what was previously logged and update that information.
+  snapshot->Subtract(*logged_samples_);
+  logged_samples_->Add(*snapshot);
+  return std::move(snapshot);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotFinalDelta() const {
+  DCHECK(!final_delta_created_);
+  final_delta_created_ = true;
+
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+  base::AutoLock auto_lock(lock_);
+  snapshot->Add(*samples_);
+
+  // Subtract what was previously logged and then return.
+  snapshot->Subtract(*logged_samples_);
   return std::move(snapshot);
 }
 
 void SparseHistogram::AddSamples(const HistogramSamples& samples) {
   base::AutoLock auto_lock(lock_);
-  samples_.Add(samples);
+  samples_->Add(samples);
 }
 
 bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) {
   base::AutoLock auto_lock(lock_);
-  return samples_.AddFromPickle(iter);
+  return samples_->AddFromPickle(iter);
 }
 
 void SparseHistogram::WriteHTMLGraph(std::string* output) const {
@@ -103,7 +177,28 @@
 
 SparseHistogram::SparseHistogram(const std::string& name)
     : HistogramBase(name),
-      samples_(HashMetricName(name)) {}
+      samples_(new SampleMap(HashMetricName(name))),
+      logged_samples_(new SampleMap(samples_->id())) {}
+
+SparseHistogram::SparseHistogram(PersistentHistogramAllocator* allocator,
+                                 const std::string& name,
+                                 HistogramSamples::Metadata* meta,
+                                 HistogramSamples::Metadata* logged_meta)
+    : HistogramBase(name),
+      // While other histogram types maintain a static vector of values with
+      // sufficient space for both "active" and "logged" samples, with each
+      // SampleVector being given the appropriate half, sparse histograms
+      // have no such initial allocation. Each sample has its own record
+      // attached to a single PersistentSampleMap by a common 64-bit identifier.
+      // Since a sparse histogram has two sample maps (active and logged),
+      // there must be two sets of sample records with diffent IDs. The
+      // "active" samples use, for convenience purposes, an ID matching
+      // that of the histogram while the "logged" samples use that number
+      // plus 1.
+      samples_(new PersistentSampleMap(HashMetricName(name), allocator, meta)),
+      logged_samples_(
+          new PersistentSampleMap(samples_->id() + 1, allocator, logged_meta)) {
+}
 
 HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
   std::string histogram_name;
@@ -113,19 +208,18 @@
     return NULL;
   }
 
-  DCHECK(flags & HistogramBase::kIPCSerializationSourceFlag);
   flags &= ~HistogramBase::kIPCSerializationSourceFlag;
 
   return SparseHistogram::FactoryGet(histogram_name, flags);
 }
 
-void SparseHistogram::GetParameters(DictionaryValue* /* params */) const {
+void SparseHistogram::GetParameters(DictionaryValue* /*params*/) const {
   // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
 }
 
-void SparseHistogram::GetCountAndBucketData(Count* /* count */,
-                                            int64_t* /* sum */,
-                                            ListValue* /* buckets */) const {
+void SparseHistogram::GetCountAndBucketData(Count* /*count*/,
+                                            int64_t* /*sum*/,
+                                            ListValue* /*buckets*/) const {
   // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
 }
 
@@ -133,7 +227,7 @@
                                      const std::string& newline,
                                      std::string* output) const {
   // Get a local copy of the data so we are consistent.
-  scoped_ptr<HistogramSamples> snapshot = SnapshotSamples();
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
   Count total_count = snapshot->TotalCount();
   double scaled_total_count = total_count / 100.0;
 
@@ -146,7 +240,7 @@
   // normalize the graphical bar-width relative to that sample count.
   Count largest_count = 0;
   Sample largest_sample = 0;
-  scoped_ptr<SampleCountIterator> it = snapshot->Iterator();
+  std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
   while (!it->Done()) {
     Sample min;
     Sample max;
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
index a77c020..3b302d6 100644
--- a/base/metrics/sparse_histogram.h
+++ b/base/metrics/sparse_histogram.h
@@ -9,19 +9,40 @@
 #include <stdint.h>
 
 #include <map>
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/sample_map.h"
 #include "base/synchronization/lock.h"
 
 namespace base {
 
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a large range.
+//
+// The implementation uses a lock and a map, whereas other histogram types use a
+// vector and no lock. It is thus more costly to add values to, and each value
+// stored has more overhead, compared to the other histogram types. However it
+// may be more efficient in memory if the total number of sample values is small
+// compared to the range of their values.
+//
+// UMA_HISTOGRAM_ENUMERATION would be better suited for a smaller range of
+// enumerations that are (nearly) contiguous. Also for code that is expected to
+// run often or in a tight loop.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and or
+// infrequently recorded values.
+//
+// For instance, Sqlite.Version.* are SPARSE because for any given database,
+// there's going to be exactly one version logged, meaning no gain to having a
+// pre-allocated vector of slots once the fleet gets to version 4 or 5 or 10.
+// Likewise Sqlite.Error.* are SPARSE, because most databases generate few or no
+// errors and there are large gaps in the set of possible errors.
 #define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
     do { \
       base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
@@ -30,6 +51,7 @@
     } while (0)
 
 class HistogramSamples;
+class PersistentHistogramAllocator;
 
 class BASE_EXPORT SparseHistogram : public HistogramBase {
  public:
@@ -37,6 +59,14 @@
   // new one.
   static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
 
+  // Create a histogram using data in persistent storage. The allocator must
+  // live longer than the created sparse histogram.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      PersistentHistogramAllocator* allocator,
+      const std::string& name,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   ~SparseHistogram() override;
 
   // HistogramBase implementation:
@@ -44,12 +74,14 @@
   HistogramType GetHistogramType() const override;
   bool HasConstructionArguments(Sample expected_minimum,
                                 Sample expected_maximum,
-                                size_t expected_bucket_count) const override;
+                                uint32_t expected_bucket_count) const override;
   void Add(Sample value) override;
   void AddCount(Sample value, int count) override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
-  scoped_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
   void WriteHTMLGraph(std::string* output) const override;
   void WriteAscii(std::string* output) const override;
 
@@ -61,6 +93,11 @@
   // Clients should always use FactoryGet to create SparseHistogram.
   explicit SparseHistogram(const std::string& name);
 
+  SparseHistogram(PersistentHistogramAllocator* allocator,
+                  const std::string& name,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
       base::PickleIterator* iter);
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
@@ -85,7 +122,11 @@
   // Protects access to |samples_|.
   mutable base::Lock lock_;
 
-  SampleMap samples_;
+  // Flag to indicate if PrepareFinalDelta has been previously called.
+  mutable bool final_delta_created_ = false;
+
+  std::unique_ptr<HistogramSamples> samples_;
+  std::unique_ptr<HistogramSamples> logged_samples_;
 
   DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
 };
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index 83cf5d3..eab7790 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -4,11 +4,13 @@
 
 #include "base/metrics/sparse_histogram.h"
 
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/sample_map.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
@@ -17,71 +19,136 @@
 
 namespace base {
 
-class SparseHistogramTest : public testing::Test {
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class SparseHistogramTest : public testing::TestWithParam<bool> {
  protected:
+  const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
+
+  SparseHistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
   void SetUp() override {
+    if (use_persistent_histogram_allocator_)
+      CreatePersistentMemoryAllocator();
+
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     InitializeStatisticsRecorder();
   }
 
-  void TearDown() override { UninitializeStatisticsRecorder(); }
+  void TearDown() override {
+    if (allocator_) {
+      ASSERT_FALSE(allocator_->IsFull());
+      ASSERT_FALSE(allocator_->IsCorrupt());
+    }
+    UninitializeStatisticsRecorder();
+    DestroyPersistentMemoryAllocator();
+  }
 
   void InitializeStatisticsRecorder() {
-    statistics_recorder_ = new StatisticsRecorder();
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
   }
 
   void UninitializeStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = NULL;
+    statistics_recorder_.reset();
   }
 
-  scoped_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
-    return scoped_ptr<SparseHistogram>(new SparseHistogram(name));
+  void CreatePersistentMemoryAllocator() {
+    // By getting the results-histogram before any persistent allocator
+    // is attached, that histogram is guaranteed not to be stored in
+    // any persistent memory segment (which simplifies some tests).
+    GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
+
+    GlobalHistogramAllocator::CreateWithLocalMemory(
+        kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
   }
 
-  StatisticsRecorder* statistics_recorder_;
+  void DestroyPersistentMemoryAllocator() {
+    allocator_ = nullptr;
+    GlobalHistogramAllocator::ReleaseForTesting();
+  }
+
+  std::unique_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
+    return std::unique_ptr<SparseHistogram>(new SparseHistogram(name));
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SparseHistogramTest);
 };
 
-TEST_F(SparseHistogramTest, BasicTest) {
-  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
-  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent,
+                        SparseHistogramTest,
+                        testing::Bool());
+
+
+TEST_P(SparseHistogramTest, BasicTest) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(0, snapshot->TotalCount());
   EXPECT_EQ(0, snapshot->sum());
 
   histogram->Add(100);
-  scoped_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
   EXPECT_EQ(1, snapshot1->TotalCount());
   EXPECT_EQ(1, snapshot1->GetCount(100));
 
   histogram->Add(100);
   histogram->Add(101);
-  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
   EXPECT_EQ(3, snapshot2->TotalCount());
   EXPECT_EQ(2, snapshot2->GetCount(100));
   EXPECT_EQ(1, snapshot2->GetCount(101));
 }
 
-TEST_F(SparseHistogramTest, BasicTestAddCount) {
-  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
-  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+TEST_P(SparseHistogramTest, BasicTestAddCount) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(0, snapshot->TotalCount());
   EXPECT_EQ(0, snapshot->sum());
 
   histogram->AddCount(100, 15);
-  scoped_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
   EXPECT_EQ(15, snapshot1->TotalCount());
   EXPECT_EQ(15, snapshot1->GetCount(100));
 
   histogram->AddCount(100, 15);
   histogram->AddCount(101, 25);
-  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
   EXPECT_EQ(55, snapshot2->TotalCount());
   EXPECT_EQ(30, snapshot2->GetCount(100));
   EXPECT_EQ(25, snapshot2->GetCount(101));
 }
 
-TEST_F(SparseHistogramTest, MacroBasicTest) {
+TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(0, snapshot->TotalCount());
+  EXPECT_EQ(0, snapshot->sum());
+
+  histogram->AddCount(1000000000, 15);
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  EXPECT_EQ(15, snapshot1->TotalCount());
+  EXPECT_EQ(15, snapshot1->GetCount(1000000000));
+
+  histogram->AddCount(1000000000, 15);
+  histogram->AddCount(1010000000, 25);
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(55, snapshot2->TotalCount());
+  EXPECT_EQ(30, snapshot2->GetCount(1000000000));
+  EXPECT_EQ(25, snapshot2->GetCount(1010000000));
+  EXPECT_EQ(55250000000LL, snapshot2->sum());
+}
+
+TEST_P(SparseHistogramTest, MacroBasicTest) {
   UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
   UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 200);
   UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
@@ -94,16 +161,20 @@
 
   EXPECT_EQ(SPARSE_HISTOGRAM, sparse_histogram->GetHistogramType());
   EXPECT_EQ("Sparse", sparse_histogram->histogram_name());
-  EXPECT_EQ(HistogramBase::kUmaTargetedHistogramFlag,
-            sparse_histogram->flags());
+  EXPECT_EQ(
+      HistogramBase::kUmaTargetedHistogramFlag |
+          (use_persistent_histogram_allocator_ ? HistogramBase::kIsPersistent
+                                               : 0),
+      sparse_histogram->flags());
 
-  scoped_ptr<HistogramSamples> samples = sparse_histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples =
+      sparse_histogram->SnapshotSamples();
   EXPECT_EQ(3, samples->TotalCount());
   EXPECT_EQ(2, samples->GetCount(100));
   EXPECT_EQ(1, samples->GetCount(200));
 }
 
-TEST_F(SparseHistogramTest, MacroInLoopTest) {
+TEST_P(SparseHistogramTest, MacroInLoopTest) {
   // Unlike the macros in histogram.h, SparseHistogram macros can have a
   // variable as histogram name.
   for (int i = 0; i < 2; i++) {
@@ -121,8 +192,8 @@
               ("Sparse2" == name1 && "Sparse1" == name2));
 }
 
-TEST_F(SparseHistogramTest, Serialize) {
-  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+TEST_P(SparseHistogramTest, Serialize) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   histogram->SetFlags(HistogramBase::kIPCSerializationSourceFlag);
 
   Pickle pickle;
@@ -146,4 +217,113 @@
   EXPECT_FALSE(iter.SkipBytes(1));
 }
 
+// Ensure that race conditions that cause multiple, identical sparse histograms
+// to be created will safely resolve to a single one.
+TEST_P(SparseHistogramTest, DuplicationSafety) {
+  const char histogram_name[] = "Duplicated";
+  size_t histogram_count = StatisticsRecorder::GetHistogramCount();
+
+  // Create a histogram that we will later duplicate.
+  HistogramBase* original =
+      SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+  ++histogram_count;
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+  original->Add(1);
+
+  // Create a duplicate. This has to happen differently depending on where the
+  // memory is taken from.
+  if (use_persistent_histogram_allocator_) {
+    // To allocate from persistent memory, clear the last_created reference in
+    // the GlobalHistogramAllocator. This will cause an Import to recreate
+    // the just-created histogram which will then be released as a duplicate.
+    GlobalHistogramAllocator::Get()->ClearLastCreatedReferenceForTesting();
+    // Creating a different histogram will first do an Import to ensure it
+    // hasn't been created elsewhere, triggering the duplication and release.
+    SparseHistogram::FactoryGet("something.new", HistogramBase::kNoFlags);
+    ++histogram_count;
+  } else {
+    // To allocate from the heap, just call the (private) constructor directly.
+    // Delete it immediately like would have happened within FactoryGet();
+    std::unique_ptr<SparseHistogram> something =
+        NewSparseHistogram(histogram_name);
+    DCHECK_NE(original, something.get());
+  }
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+
+  // Re-creating the histogram via FactoryGet() will return the same one.
+  HistogramBase* duplicate =
+      SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+  DCHECK_EQ(original, duplicate);
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+  duplicate->Add(2);
+
+  // Ensure that original histograms are still cross-functional.
+  original->Add(2);
+  duplicate->Add(1);
+  std::unique_ptr<HistogramSamples> snapshot_orig = original->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> snapshot_dup = duplicate->SnapshotSamples();
+  DCHECK_EQ(2, snapshot_orig->GetCount(2));
+  DCHECK_EQ(2, snapshot_dup->GetCount(1));
+}
+
+TEST_P(SparseHistogramTest, FactoryTime) {
+  const int kTestCreateCount = 1 << 10;  // Must be power-of-2.
+  const int kTestLookupCount = 100000;
+  const int kTestAddCount = 100000;
+
+  // Create all histogram names in advance for accurate timing below.
+  std::vector<std::string> histogram_names;
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    histogram_names.push_back(
+        StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+  }
+
+  // Calculate cost of creating histograms.
+  TimeTicks create_start = TimeTicks::Now();
+  for (int i = 0; i < kTestCreateCount; ++i)
+    SparseHistogram::FactoryGet(histogram_names[i], HistogramBase::kNoFlags);
+  TimeDelta create_ticks = TimeTicks::Now() - create_start;
+  int64_t create_ms = create_ticks.InMilliseconds();
+
+  VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+          << "ms or about "
+          << (create_ms * 1000000) / kTestCreateCount
+          << "ns each.";
+
+  // Calculate cost of looking up existing histograms.
+  TimeTicks lookup_start = TimeTicks::Now();
+  for (int i = 0; i < kTestLookupCount; ++i) {
+    // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+    // order less likely to be cacheable (but still hit them all) should the
+    // underlying storage use the exact histogram name as the key.
+    const int i_mult = 6007;
+    static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+    int index = (i * i_mult) & (kTestCreateCount - 1);
+    SparseHistogram::FactoryGet(histogram_names[index],
+                                HistogramBase::kNoFlags);
+  }
+  TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+  int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+  VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+          << "ms or about "
+          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ns each.";
+
+  // Calculate cost of accessing histograms.
+  HistogramBase* histogram =
+      SparseHistogram::FactoryGet(histogram_names[0], HistogramBase::kNoFlags);
+  ASSERT_TRUE(histogram);
+  TimeTicks add_start = TimeTicks::Now();
+  for (int i = 0; i < kTestAddCount; ++i)
+    histogram->Add(i & 127);
+  TimeDelta add_ticks = TimeTicks::Now() - add_start;
+  int64_t add_ms = add_ticks.InMilliseconds();
+
+  VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+          << "ms or about "
+          << (add_ms * 1000000) / kTestAddCount
+          << "ns each.";
+}
+
 }  // namespace base
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index f8257f4..42ed5a9 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -4,25 +4,93 @@
 
 #include "base/metrics/statistics_recorder.h"
 
+#include <memory>
+
 #include "base/at_exit.h"
+#include "base/debug/leak_annotations.h"
 #include "base/json/string_escape.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
 #include "base/values.h"
 
 namespace {
+
 // Initialize histogram statistics gathering system.
 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
     LAZY_INSTANCE_INITIALIZER;
+
+bool HistogramNameLesser(const base::HistogramBase* a,
+                         const base::HistogramBase* b) {
+  return a->histogram_name() < b->histogram_name();
+}
+
 }  // namespace
 
 namespace base {
 
+StatisticsRecorder::HistogramIterator::HistogramIterator(
+    const HistogramMap::iterator& iter, bool include_persistent)
+    : iter_(iter),
+      include_persistent_(include_persistent) {
+  // The starting location could point to a persistent histogram when such
+  // is not wanted. If so, skip it.
+  if (!include_persistent_ && iter_ != histograms_->end() &&
+      (iter_->second->flags() & HistogramBase::kIsPersistent)) {
+    // This operator will continue to skip until a non-persistent histogram
+    // is found.
+    operator++();
+  }
+}
+
+StatisticsRecorder::HistogramIterator::HistogramIterator(
+    const HistogramIterator& rhs)
+    : iter_(rhs.iter_),
+      include_persistent_(rhs.include_persistent_) {
+}
+
+StatisticsRecorder::HistogramIterator::~HistogramIterator() {}
+
+StatisticsRecorder::HistogramIterator&
+StatisticsRecorder::HistogramIterator::operator++() {
+  const HistogramMap::iterator histograms_end = histograms_->end();
+  if (iter_ == histograms_end || lock_ == NULL)
+    return *this;
+
+  base::AutoLock auto_lock(*lock_);
+
+  for (;;) {
+    ++iter_;
+    if (iter_ == histograms_end)
+      break;
+    if (!include_persistent_ && (iter_->second->flags() &
+                                 HistogramBase::kIsPersistent)) {
+      continue;
+    }
+    break;
+  }
+
+  return *this;
+}
+
+StatisticsRecorder::~StatisticsRecorder() {
+  DCHECK(lock_);
+  DCHECK(histograms_);
+  DCHECK(ranges_);
+
+  // Clean out what this object created and then restore what existed before.
+  Reset();
+  base::AutoLock auto_lock(*lock_);
+  histograms_ = existing_histograms_.release();
+  callbacks_ = existing_callbacks_.release();
+  ranges_ = existing_ranges_.release();
+}
+
 // static
 void StatisticsRecorder::Initialize() {
   // Ensure that an instance of the StatisticsRecorder object is created.
@@ -40,8 +108,13 @@
 // static
 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
     HistogramBase* histogram) {
-  // As per crbug.com/79322 the histograms are intentionally leaked.
+  // As per crbug.com/79322 the histograms are intentionally leaked, so we need
+  // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
+  // for an object, the duplicates should not be annotated.
+  // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
+  // twice if (lock_ == NULL) || (!histograms_).
   if (lock_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
     return histogram;
   }
 
@@ -53,10 +126,12 @@
       histogram_to_return = histogram;
     } else {
       const std::string& name = histogram->histogram_name();
-      uint64_t name_hash = histogram->name_hash();
-      HistogramMap::iterator it = histograms_->find(name_hash);
+      HistogramMap::iterator it = histograms_->find(name);
       if (histograms_->end() == it) {
-        (*histograms_)[name_hash] = histogram;
+        // The StringKey references the name within |histogram| rather than
+        // making a copy.
+        (*histograms_)[name] = histogram;
+        ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
         // If there are callbacks for this histogram, we set the kCallbackExists
         // flag.
         auto callback_iterator = callbacks_->find(name);
@@ -87,14 +162,16 @@
 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
     const BucketRanges* ranges) {
   DCHECK(ranges->HasValidChecksum());
-  scoped_ptr<const BucketRanges> ranges_deleter;
+  std::unique_ptr<const BucketRanges> ranges_deleter;
 
   if (lock_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
     return ranges;
   }
 
   base::AutoLock auto_lock(*lock_);
   if (ranges_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
     return ranges;
   }
 
@@ -103,6 +180,7 @@
   if (ranges_->end() == ranges_it) {
     // Add a new matching list to map.
     checksum_matching_list = new std::list<const BucketRanges*>();
+    ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
     (*ranges_)[ranges->checksum()] = checksum_matching_list;
   } else {
     checksum_matching_list = ranges_it->second;
@@ -132,6 +210,7 @@
 
   Histograms snapshot;
   GetSnapshot(query, &snapshot);
+  std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
   for (const HistogramBase* histogram : snapshot) {
     histogram->WriteHTMLGraph(output);
     output->append("<br><hr><br>");
@@ -150,6 +229,7 @@
 
   Histograms snapshot;
   GetSnapshot(query, &snapshot);
+  std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
   for (const HistogramBase* histogram : snapshot) {
     histogram->WriteAscii(output);
     output->append("\n");
@@ -194,7 +274,6 @@
     return;
 
   for (const auto& entry : *histograms_) {
-    DCHECK_EQ(entry.first, entry.second->name_hash());
     output->push_back(entry.second);
   }
 }
@@ -209,28 +288,79 @@
     return;
 
   for (const auto& entry : *ranges_) {
-    for (const auto& range_entry : *entry.second) {
+    for (auto* range_entry : *entry.second) {
       output->push_back(range_entry);
     }
   }
 }
 
 // static
-HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
+HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
+  // This must be called *before* the lock is acquired below because it will
+  // call back into this object to register histograms. Those called methods
+  // will acquire the lock at that time.
+  ImportGlobalPersistentHistograms();
+
   if (lock_ == NULL)
     return NULL;
   base::AutoLock auto_lock(*lock_);
   if (histograms_ == NULL)
     return NULL;
 
-  HistogramMap::iterator it = histograms_->find(HashMetricName(name));
+  HistogramMap::iterator it = histograms_->find(name);
   if (histograms_->end() == it)
     return NULL;
-  DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
   return it->second;
 }
 
 // static
+StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
+    bool include_persistent) {
+  DCHECK(histograms_);
+  ImportGlobalPersistentHistograms();
+
+  HistogramMap::iterator iter_begin;
+  {
+    base::AutoLock auto_lock(*lock_);
+    iter_begin = histograms_->begin();
+  }
+  return HistogramIterator(iter_begin, include_persistent);
+}
+
+// static
+StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
+  HistogramMap::iterator iter_end;
+  {
+    base::AutoLock auto_lock(*lock_);
+    iter_end = histograms_->end();
+  }
+  return HistogramIterator(iter_end, true);
+}
+
+// static
+void StatisticsRecorder::InitLogOnShutdown() {
+  if (lock_ == nullptr)
+    return;
+  base::AutoLock auto_lock(*lock_);
+  g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock();
+}
+
+// static
+void StatisticsRecorder::GetSnapshot(const std::string& query,
+                                     Histograms* snapshot) {
+  if (lock_ == NULL)
+    return;
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
+    return;
+
+  for (const auto& entry : *histograms_) {
+    if (entry.second->histogram_name().find(query) != std::string::npos)
+      snapshot->push_back(entry.second);
+  }
+}
+
+// static
 bool StatisticsRecorder::SetCallback(
     const std::string& name,
     const StatisticsRecorder::OnSampleCallback& cb) {
@@ -245,11 +375,9 @@
     return false;
   callbacks_->insert(std::make_pair(name, cb));
 
-  HistogramMap::iterator it = histograms_->find(HashMetricName(name));
-  if (it != histograms_->end()) {
-    DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
+  auto it = histograms_->find(name);
+  if (it != histograms_->end())
     it->second->SetFlags(HistogramBase::kCallbackExists);
-  }
 
   return true;
 }
@@ -265,11 +393,9 @@
   callbacks_->erase(name);
 
   // We also clear the flag from the histogram (if it exists).
-  HistogramMap::iterator it = histograms_->find(HashMetricName(name));
-  if (it != histograms_->end()) {
-    DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
+  auto it = histograms_->find(name);
+  if (it != histograms_->end())
     it->second->ClearFlags(HistogramBase::kCallbackExists);
-  }
 }
 
 // static
@@ -286,26 +412,64 @@
                                                 : OnSampleCallback();
 }
 
-// private static
-void StatisticsRecorder::GetSnapshot(const std::string& query,
-                                     Histograms* snapshot) {
-  if (lock_ == NULL)
-    return;
+// static
+size_t StatisticsRecorder::GetHistogramCount() {
+  if (!lock_)
+    return 0;
+
   base::AutoLock auto_lock(*lock_);
-  if (histograms_ == NULL)
+  if (!histograms_)
+    return 0;
+  return histograms_->size();
+}
+
+// static
+void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
+  if (histograms_)
+    histograms_->erase(name);
+}
+
+// static
+std::unique_ptr<StatisticsRecorder>
+StatisticsRecorder::CreateTemporaryForTesting() {
+  return WrapUnique(new StatisticsRecorder());
+}
+
+// static
+void StatisticsRecorder::UninitializeForTesting() {
+  // Stop now if it's never been initialized.
+  if (lock_ == NULL || histograms_ == NULL)
     return;
 
-  for (const auto& entry : *histograms_) {
-    if (entry.second->histogram_name().find(query) != std::string::npos)
-      snapshot->push_back(entry.second);
-  }
+  // Get the global instance and destruct it. It's held in static memory so
+  // can't "delete" it; call the destructor explicitly.
+  DCHECK(g_statistics_recorder_.private_instance_);
+  g_statistics_recorder_.Get().~StatisticsRecorder();
+
+  // Now the ugly part. There's no official way to release a LazyInstance once
+  // created so it's necessary to clear out an internal variable which
+  // shouldn't be publicly visible but is for initialization reasons.
+  g_statistics_recorder_.private_instance_ = 0;
+}
+
+// static
+void StatisticsRecorder::ImportGlobalPersistentHistograms() {
+  if (lock_ == NULL)
+    return;
+
+  // Import histograms from known persistent storage. Histograms could have
+  // been added by other processes and they must be fetched and recognized
+  // locally. If the persistent memory segment is not shared between processes,
+  // this call does nothing.
+  GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+  if (allocator)
+    allocator->ImportHistogramsToStatisticsRecorder();
 }
 
 // This singleton instance should be started during the single threaded portion
 // of main(), and hence it is not thread safe.  It initializes globals to
 // provide support for all future calls.
 StatisticsRecorder::StatisticsRecorder() {
-  DCHECK(!histograms_);
   if (lock_ == NULL) {
     // This will leak on purpose. It's the only way to make sure we won't race
     // against the static uninitialization of the module while one of our
@@ -315,29 +479,36 @@
     // during static initialization and released only on  process termination.
     lock_ = new base::Lock;
   }
+
   base::AutoLock auto_lock(*lock_);
+
+  existing_histograms_.reset(histograms_);
+  existing_callbacks_.reset(callbacks_);
+  existing_ranges_.reset(ranges_);
+
   histograms_ = new HistogramMap;
   callbacks_ = new CallbackMap;
   ranges_ = new RangesMap;
 
-  if (VLOG_IS_ON(1))
+  InitLogOnShutdownWithoutLock();
+}
+
+void StatisticsRecorder::InitLogOnShutdownWithoutLock() {
+  if (!vlog_initialized_ && VLOG_IS_ON(1)) {
+    vlog_initialized_ = true;
     AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
+  }
 }
 
 // static
-void StatisticsRecorder::DumpHistogramsToVlog(void* /* instance */) {
-  std::string output;
-  StatisticsRecorder::WriteGraph(std::string(), &output);
-  VLOG(1) << output;
-}
+void StatisticsRecorder::Reset() {
+  // If there's no lock then there is nothing to reset.
+  if (!lock_)
+    return;
 
-StatisticsRecorder::~StatisticsRecorder() {
-  DCHECK(histograms_ && ranges_ && lock_);
-
-  // Clean up.
-  scoped_ptr<HistogramMap> histograms_deleter;
-  scoped_ptr<CallbackMap> callbacks_deleter;
-  scoped_ptr<RangesMap> ranges_deleter;
+  std::unique_ptr<HistogramMap> histograms_deleter;
+  std::unique_ptr<CallbackMap> callbacks_deleter;
+  std::unique_ptr<RangesMap> ranges_deleter;
   // We don't delete lock_ on purpose to avoid having to properly protect
   // against it going away after we checked for NULL in the static methods.
   {
@@ -352,6 +523,13 @@
   // We are going to leak the histograms and the ranges.
 }
 
+// static
+void StatisticsRecorder::DumpHistogramsToVlog(void* /*instance*/) {
+  std::string output;
+  StatisticsRecorder::WriteGraph(std::string(), &output);
+  VLOG(1) << output;
+}
+
 
 // static
 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index b1d182e..c3c6ace 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -14,6 +14,7 @@
 
 #include <list>
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -23,6 +24,9 @@
 #include "base/lazy_instance.h"
 #include "base/macros.h"
 #include "base/metrics/histogram_base.h"
+#include "base/strings/string_piece.h"
+
+class SubprocessMetricsProviderTest;
 
 namespace base {
 
@@ -31,8 +35,68 @@
 
 class BASE_EXPORT StatisticsRecorder {
  public:
+  // A class used as a key for the histogram map below. It always references
+  // a string owned outside of this class, likely in the value of the map.
+  class StringKey : public StringPiece {
+   public:
+    // Constructs the StringKey using various sources. The source must live
+    // at least as long as the created object.
+    StringKey(const std::string& str) : StringPiece(str) {}
+    StringKey(StringPiece str) : StringPiece(str) {}
+
+    // Though StringPiece is better passed by value than by reference, in
+    // this case it's being passed many times and likely already been stored
+    // in memory (not just registers) so the benefit of pass-by-value is
+    // negated.
+    bool operator<(const StringKey& rhs) const {
+      // Since order is unimportant in the map and string comparisons can be
+      // slow, use the length as the primary sort value.
+      if (length() < rhs.length())
+        return true;
+      if (length() > rhs.length())
+        return false;
+
+      // Fall back to an actual string comparison. The lengths are the same
+      // so a simple memory-compare is sufficient. This is slightly more
+      // efficient than calling operator<() for StringPiece which would
+      // again have to check lengths before calling wordmemcmp().
+      return wordmemcmp(data(), rhs.data(), length()) < 0;
+    }
+  };
+
+  typedef std::map<StringKey, HistogramBase*> HistogramMap;
   typedef std::vector<HistogramBase*> Histograms;
 
+  // A class for iterating over the histograms held within this global resource.
+  class BASE_EXPORT HistogramIterator {
+   public:
+    HistogramIterator(const HistogramMap::iterator& iter,
+                      bool include_persistent);
+    HistogramIterator(const HistogramIterator& rhs);  // Must be copyable.
+    ~HistogramIterator();
+
+    HistogramIterator& operator++();
+    HistogramIterator operator++(int) {
+      HistogramIterator tmp(*this);
+      operator++();
+      return tmp;
+    }
+
+    bool operator==(const HistogramIterator& rhs) const {
+      return iter_ == rhs.iter_;
+    }
+    bool operator!=(const HistogramIterator& rhs) const {
+      return iter_ != rhs.iter_;
+    }
+    HistogramBase* operator*() { return iter_->second; }
+
+   private:
+    HistogramMap::iterator iter_;
+    const bool include_persistent_;
+  };
+
+  ~StatisticsRecorder();
+
   // Initializes the StatisticsRecorder system. Safe to call multiple times.
   static void Initialize();
 
@@ -70,7 +134,11 @@
 
   // Find a histogram by name. It matches the exact name. This method is thread
   // safe.  It returns NULL if a matching histogram is not found.
-  static HistogramBase* FindHistogram(const std::string& name);
+  static HistogramBase* FindHistogram(base::StringPiece name);
+
+  // Support for iterating over known histograms.
+  static HistogramIterator begin(bool include_persistent);
+  static HistogramIterator end();
 
   // GetSnapshot copies some of the pointers to registered histograms into the
   // caller supplied vector (Histograms). Only histograms which have |query| as
@@ -96,11 +164,33 @@
   // histogram. This method is thread safe.
   static OnSampleCallback FindCallback(const std::string& histogram_name);
 
- private:
-  // We keep all registered histograms in a map, indexed by the hash of the
-  // name of the histogram.
-  typedef std::map<uint64_t, HistogramBase*> HistogramMap;
+  // Returns the number of known histograms.
+  static size_t GetHistogramCount();
 
+  // Initializes logging histograms with --v=1. Safe to call multiple times.
+  // Is called from ctor but for browser it seems that it is more useful to
+  // start logging after statistics recorder, so we need to init log-on-shutdown
+  // later.
+  static void InitLogOnShutdown();
+
+  // Removes a histogram from the internal set of known ones. This can be
+  // necessary during testing persistent histograms where the underlying
+  // memory is being released.
+  static void ForgetHistogramForTesting(base::StringPiece name);
+
+  // Creates a local StatisticsRecorder object for testing purposes. All new
+  // histograms will be registered in it until it is destructed or pushed
+  // aside for the lifetime of yet another SR object. The destruction of the
+  // returned object will re-activate the previous one. Always release SR
+  // objects in the opposite order to which they're created.
+  static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
+      WARN_UNUSED_RESULT;
+
+  // Resets any global instance of the statistics-recorder that was created
+  // by a call to Initialize().
+  static void UninitializeForTesting();
+
+ private:
   // We keep a map of callbacks to histograms, so that as histograms are
   // created, we can set the callback properly.
   typedef std::map<std::string, OnSampleCallback> CallbackMap;
@@ -111,21 +201,31 @@
   typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
 
   friend struct DefaultLazyInstanceTraits<StatisticsRecorder>;
-  friend class HistogramBaseTest;
-  friend class HistogramSnapshotManagerTest;
-  friend class HistogramTest;
-  friend class JsonPrefStoreTest;
-  friend class SparseHistogramTest;
   friend class StatisticsRecorderTest;
-  FRIEND_TEST_ALL_PREFIXES(HistogramDeltaSerializationTest,
-                           DeserializeHistogramAndAddSamples);
+
+  // Imports histograms from global persistent memory. The global lock must
+  // not be held during this call.
+  static void ImportGlobalPersistentHistograms();
 
   // The constructor just initializes static members. Usually client code should
   // use Initialize to do this. But in test code, you can friend this class and
-  // call destructor/constructor to get a clean StatisticsRecorder.
+  // call the constructor to get a clean StatisticsRecorder.
   StatisticsRecorder();
-  ~StatisticsRecorder();
 
+  // Initialize implementation but without lock. Caller should guard
+  // StatisticsRecorder by itself if needed (it isn't in unit tests).
+  void InitLogOnShutdownWithoutLock();
+
+  // These are copies of everything that existed when the (test) Statistics-
+  // Recorder was created. The global ones have to be moved aside to create a
+  // clean environment.
+  std::unique_ptr<HistogramMap> existing_histograms_;
+  std::unique_ptr<CallbackMap> existing_callbacks_;
+  std::unique_ptr<RangesMap> existing_ranges_;
+
+  bool vlog_initialized_ = false;
+
+  static void Reset();
   static void DumpHistogramsToVlog(void* instance);
 
   static HistogramMap* histograms_;
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index af5c1e7..65e2c98 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -2,38 +2,79 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/metrics/statistics_recorder.h"
+
 #include <stddef.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/bind.h"
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/logging.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/sparse_histogram.h"
-#include "base/metrics/statistics_recorder.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+namespace {
+
+// Class to make sure any manipulations we do to the min log level are
+// contained (i.e., do not affect other unit tests).
+class LogStateSaver {
+ public:
+  LogStateSaver() : old_min_log_level_(logging::GetMinLogLevel()) {}
+
+  ~LogStateSaver() {
+    logging::SetMinLogLevel(old_min_log_level_);
+    logging::SetLogAssertHandler(nullptr);
+  }
+
+ private:
+  int old_min_log_level_;
+
+  DISALLOW_COPY_AND_ASSIGN(LogStateSaver);
+};
+
+}  // namespace
+
 namespace base {
 
-class StatisticsRecorderTest : public testing::Test {
+class StatisticsRecorderTest : public testing::TestWithParam<bool> {
  protected:
-  void SetUp() override {
+  const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
+
+  StatisticsRecorderTest() : use_persistent_histogram_allocator_(GetParam()) {
+    // Get this first so it never gets created in persistent storage and will
+    // not appear in the StatisticsRecorder after it is re-initialized.
+    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     InitializeStatisticsRecorder();
+
+    // Use persistent memory for histograms if so indicated by test parameter.
+    if (use_persistent_histogram_allocator_) {
+      GlobalHistogramAllocator::CreateWithLocalMemory(
+          kAllocatorMemorySize, 0, "StatisticsRecorderTest");
+    }
   }
 
-  void TearDown() override { UninitializeStatisticsRecorder(); }
+  ~StatisticsRecorderTest() override {
+    GlobalHistogramAllocator::ReleaseForTesting();
+    UninitializeStatisticsRecorder();
+  }
 
   void InitializeStatisticsRecorder() {
-    statistics_recorder_ = new StatisticsRecorder();
+    DCHECK(!statistics_recorder_);
+    StatisticsRecorder::UninitializeForTesting();
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
   }
 
   void UninitializeStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = NULL;
+    statistics_recorder_.reset();
+    StatisticsRecorder::UninitializeForTesting();
   }
 
   Histogram* CreateHistogram(const std::string& name,
@@ -51,10 +92,39 @@
     delete histogram;
   }
 
-  StatisticsRecorder* statistics_recorder_;
+  int CountIterableHistograms(StatisticsRecorder::HistogramIterator* iter) {
+    int count = 0;
+    for (; *iter != StatisticsRecorder::end(); ++*iter) {
+      ++count;
+    }
+    return count;
+  }
+
+  void InitLogOnShutdown() {
+    DCHECK(statistics_recorder_);
+    statistics_recorder_->InitLogOnShutdownWithoutLock();
+  }
+
+  bool VLogInitialized() {
+    DCHECK(statistics_recorder_);
+    return statistics_recorder_->vlog_initialized_;
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<GlobalHistogramAllocator> old_global_allocator_;
+
+ private:
+  LogStateSaver log_state_saver_;
+
+  DISALLOW_COPY_AND_ASSIGN(StatisticsRecorderTest);
 };
 
-TEST_F(StatisticsRecorderTest, NotInitialized) {
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(Allocator, StatisticsRecorderTest, testing::Bool());
+
+TEST_P(StatisticsRecorderTest, NotInitialized) {
   UninitializeStatisticsRecorder();
 
   ASSERT_FALSE(StatisticsRecorder::IsActive());
@@ -82,7 +152,7 @@
   EXPECT_EQ(0u, registered_ranges.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterBucketRanges) {
+TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
   std::vector<const BucketRanges*> registered_ranges;
 
   BucketRanges* ranges1 = new BucketRanges(3);
@@ -120,7 +190,7 @@
   ASSERT_EQ(2u, registered_ranges.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterHistogram) {
+TEST_P(StatisticsRecorderTest, RegisterHistogram) {
   // Create a Histogram that was not registered.
   Histogram* histogram = CreateHistogram("TestHistogram", 1, 1000, 10);
 
@@ -142,7 +212,7 @@
   EXPECT_EQ(1u, registered_histograms.size());
 }
 
-TEST_F(StatisticsRecorderTest, FindHistogram) {
+TEST_P(StatisticsRecorderTest, FindHistogram) {
   HistogramBase* histogram1 = Histogram::FactoryGet(
       "TestHistogram1", 1, 1000, 10, HistogramBase::kNoFlags);
   HistogramBase* histogram2 = Histogram::FactoryGet(
@@ -150,10 +220,33 @@
 
   EXPECT_EQ(histogram1, StatisticsRecorder::FindHistogram("TestHistogram1"));
   EXPECT_EQ(histogram2, StatisticsRecorder::FindHistogram("TestHistogram2"));
-  EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram") == NULL);
+  EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
+
+  // Create a new global allocator using the same memory as the old one. Any
+  // old one is kept around so the memory doesn't get released.
+  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  if (use_persistent_histogram_allocator_) {
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        const_cast<void*>(old_global_allocator_->data()),
+        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+        old_global_allocator_->Name());
+  }
+
+  // Reset statistics-recorder to validate operation from a clean start.
+  UninitializeStatisticsRecorder();
+  InitializeStatisticsRecorder();
+
+  if (use_persistent_histogram_allocator_) {
+    EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+    EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+  } else {
+    EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+    EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+  }
+  EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
 }
 
-TEST_F(StatisticsRecorderTest, GetSnapshot) {
+TEST_P(StatisticsRecorderTest, GetSnapshot) {
   Histogram::FactoryGet("TestHistogram1", 1, 1000, 10, Histogram::kNoFlags);
   Histogram::FactoryGet("TestHistogram2", 1, 1000, 10, Histogram::kNoFlags);
   Histogram::FactoryGet("TestHistogram3", 1, 1000, 10, Histogram::kNoFlags);
@@ -171,7 +264,7 @@
   EXPECT_EQ(0u, snapshot.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
   StatisticsRecorder::Histograms registered_histograms;
 
   StatisticsRecorder::GetHistograms(&registered_histograms);
@@ -217,7 +310,14 @@
   EXPECT_EQ(4u, registered_histograms.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterHistogramWithMacros) {
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithMacros) {
+  // Macros cache pointers and so tests that use them can only be run once.
+  // Stop immediately if this test has run previously.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
   StatisticsRecorder::Histograms registered_histograms;
 
   HistogramBase* histogram = Histogram::FactoryGet(
@@ -238,7 +338,7 @@
   EXPECT_EQ(3u, registered_histograms.size());
 }
 
-TEST_F(StatisticsRecorderTest, BucketRangesSharing) {
+TEST_P(StatisticsRecorderTest, BucketRangesSharing) {
   std::vector<const BucketRanges*> ranges;
   StatisticsRecorder::GetBucketRanges(&ranges);
   EXPECT_EQ(0u, ranges.size());
@@ -256,16 +356,20 @@
   EXPECT_EQ(2u, ranges.size());
 }
 
-TEST_F(StatisticsRecorderTest, ToJSON) {
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram1", 30);
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram1", 40);
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram2", 30);
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram2", 40);
+TEST_P(StatisticsRecorderTest, ToJSON) {
+  Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(30);
+  Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(40);
+  Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(30);
+  Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(40);
 
   std::string json(StatisticsRecorder::ToJSON(std::string()));
 
   // Check for valid JSON.
-  scoped_ptr<Value> root = JSONReader::Read(json);
+  std::unique_ptr<Value> root = JSONReader::Read(json);
   ASSERT_TRUE(root.get());
 
   DictionaryValue* root_dict = NULL;
@@ -315,6 +419,39 @@
   EXPECT_TRUE(json.empty());
 }
 
+TEST_P(StatisticsRecorderTest, IterationTest) {
+  Histogram::FactoryGet("IterationTest1", 1, 64, 16, HistogramBase::kNoFlags);
+  Histogram::FactoryGet("IterationTest2", 1, 64, 16, HistogramBase::kNoFlags);
+
+  StatisticsRecorder::HistogramIterator i1 = StatisticsRecorder::begin(true);
+  EXPECT_EQ(2, CountIterableHistograms(&i1));
+
+  StatisticsRecorder::HistogramIterator i2 = StatisticsRecorder::begin(false);
+  EXPECT_EQ(use_persistent_histogram_allocator_ ? 0 : 2,
+            CountIterableHistograms(&i2));
+
+  // Create a new global allocator using the same memory as the old one. Any
+  // old one is kept around so the memory doesn't get released.
+  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  if (use_persistent_histogram_allocator_) {
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        const_cast<void*>(old_global_allocator_->data()),
+        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+        old_global_allocator_->Name());
+  }
+
+  // Reset statistics-recorder to validate operation from a clean start.
+  UninitializeStatisticsRecorder();
+  InitializeStatisticsRecorder();
+
+  StatisticsRecorder::HistogramIterator i3 = StatisticsRecorder::begin(true);
+  EXPECT_EQ(use_persistent_histogram_allocator_ ? 2 : 0,
+            CountIterableHistograms(&i3));
+
+  StatisticsRecorder::HistogramIterator i4 = StatisticsRecorder::begin(false);
+  EXPECT_EQ(0, CountIterableHistograms(&i4));
+}
+
 namespace {
 
 // CallbackCheckWrapper is simply a convenient way to check and store that
@@ -334,7 +471,7 @@
 }  // namespace
 
 // Check that you can't overwrite the callback with another.
-TEST_F(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
   CallbackCheckWrapper callback_wrapper;
 
   bool result = base::StatisticsRecorder::SetCallback(
@@ -349,7 +486,7 @@
 }
 
 // Check that you can't overwrite the callback with another.
-TEST_F(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
   HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
                                                    HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
@@ -376,7 +513,7 @@
 }
 
 // Check that you can't overwrite the callback with another.
-TEST_F(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
+TEST_P(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
   HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
                                                    HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
@@ -399,7 +536,7 @@
 }
 
 // Check that callback is used.
-TEST_F(StatisticsRecorderTest, CallbackUsedTest) {
+TEST_P(StatisticsRecorderTest, CallbackUsedTest) {
   {
     HistogramBase* histogram = Histogram::FactoryGet(
         "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
@@ -473,7 +610,7 @@
 }
 
 // Check that setting a callback before the histogram exists works.
-TEST_F(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
+TEST_P(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
   CallbackCheckWrapper callback_wrapper;
 
   base::StatisticsRecorder::SetCallback(
@@ -489,4 +626,34 @@
   EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
 }
 
+TEST_P(StatisticsRecorderTest, LogOnShutdownNotInitialized) {
+  UninitializeStatisticsRecorder();
+  logging::SetMinLogLevel(logging::LOG_WARNING);
+  InitializeStatisticsRecorder();
+  EXPECT_FALSE(VLOG_IS_ON(1));
+  EXPECT_FALSE(VLogInitialized());
+  InitLogOnShutdown();
+  EXPECT_FALSE(VLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitializedExplicitly) {
+  UninitializeStatisticsRecorder();
+  logging::SetMinLogLevel(logging::LOG_WARNING);
+  InitializeStatisticsRecorder();
+  EXPECT_FALSE(VLOG_IS_ON(1));
+  EXPECT_FALSE(VLogInitialized());
+  logging::SetMinLogLevel(logging::LOG_VERBOSE);
+  EXPECT_TRUE(VLOG_IS_ON(1));
+  InitLogOnShutdown();
+  EXPECT_TRUE(VLogInitialized());
+}
+
+TEST_P(StatisticsRecorderTest, LogOnShutdownInitialized) {
+  UninitializeStatisticsRecorder();
+  logging::SetMinLogLevel(logging::LOG_VERBOSE);
+  InitializeStatisticsRecorder();
+  EXPECT_TRUE(VLOG_IS_ON(1));
+  EXPECT_TRUE(VLogInitialized());
+}
+
 }  // namespace base
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
new file mode 100644
index 0000000..169a063
--- /dev/null
+++ b/base/metrics/user_metrics.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/user_metrics.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace {
+
+LazyInstance<std::vector<ActionCallback>> g_callbacks =
+    LAZY_INSTANCE_INITIALIZER;
+LazyInstance<scoped_refptr<SingleThreadTaskRunner>> g_task_runner =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+void RecordAction(const UserMetricsAction& action) {
+  RecordComputedAction(action.str_);
+}
+
+void RecordComputedAction(const std::string& action) {
+  if (!g_task_runner.Get()) {
+    DCHECK(g_callbacks.Get().empty());
+    return;
+  }
+
+  if (!g_task_runner.Get()->BelongsToCurrentThread()) {
+    g_task_runner.Get()->PostTask(FROM_HERE,
+                                  Bind(&RecordComputedAction, action));
+    return;
+  }
+
+  for (const ActionCallback& callback : g_callbacks.Get()) {
+    callback.Run(action);
+  }
+}
+
+void AddActionCallback(const ActionCallback& callback) {
+  // Only allow adding a callback if the task runner is set.
+  DCHECK(g_task_runner.Get());
+  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+  g_callbacks.Get().push_back(callback);
+}
+
+void RemoveActionCallback(const ActionCallback& callback) {
+  DCHECK(g_task_runner.Get());
+  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+  std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
+  for (size_t i = 0; i < callbacks->size(); ++i) {
+    if ((*callbacks)[i].Equals(callback)) {
+      callbacks->erase(callbacks->begin() + i);
+      return;
+    }
+  }
+}
+
+void SetRecordActionTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
+  DCHECK(task_runner->BelongsToCurrentThread());
+  DCHECK(!g_task_runner.Get() || g_task_runner.Get()->BelongsToCurrentThread());
+  g_task_runner.Get() = task_runner;
+}
+
+}  // namespace base
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
new file mode 100644
index 0000000..93701e8
--- /dev/null
+++ b/base/metrics/user_metrics.h
@@ -0,0 +1,70 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_USER_METRICS_H_
+#define BASE_METRICS_USER_METRICS_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/metrics/user_metrics_action.h"
+#include "base/single_thread_task_runner.h"
+
+namespace base {
+
+// This module provides some helper functions for logging actions tracked by
+// the user metrics system.
+
+// Record that the user performed an action.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+//
+// "Action" here means a user-generated event:
+//   good: "Reload", "CloseTab", and "IMEInvoked"
+//   not good: "SSLDialogShown", "PageLoaded", "DiskFull"
+// We use this to gather anonymized information about how users are
+// interacting with the browser.
+// WARNING: In calls to this function, UserMetricsAction should be followed by a
+// string literal parameter and not a variable e.g.
+//   RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
+//
+// Once a new recorded action is added, run
+//   tools/metrics/actions/extract_actions.py
+// to add the metric to actions.xml, then update the <owner>s and <description>
+// sections. Make sure to include the actions.xml file when you upload your code
+// for review!
+//
+// For more complicated situations (like when there are many different
+// possible actions), see RecordComputedAction().
+BASE_EXPORT void RecordAction(const UserMetricsAction& action);
+
+// This function has identical input and behavior to RecordAction(), but is
+// not automatically found by the action-processing scripts.  It can be used
+// when it's a pain to enumerate all possible actions, but if you use this
+// you need to also update the rules for extracting known actions in
+// tools/metrics/actions/extract_actions.py.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+BASE_EXPORT void RecordComputedAction(const std::string& action);
+
+// Called with the action string.
+typedef Callback<void(const std::string&)> ActionCallback;
+
+// Add/remove action callbacks (see above).
+// These functions must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
+BASE_EXPORT void AddActionCallback(const ActionCallback& callback);
+BASE_EXPORT void RemoveActionCallback(const ActionCallback& callback);
+
+// Set the task runner on which to record actions.
+BASE_EXPORT void SetRecordActionTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+}  // namespace base
+
+#endif  // BASE_METRICS_USER_METRICS_H_
diff --git a/base/metrics/user_metrics_action.h b/base/metrics/user_metrics_action.h
index 8c195b3..3eca3dd 100644
--- a/base/metrics/user_metrics_action.h
+++ b/base/metrics/user_metrics_action.h
@@ -10,13 +10,12 @@
 // UserMetricsAction exists purely to standardize on the parameters passed to
 // UserMetrics. That way, our toolset can scan the source code reliable for
 // constructors and extract the associated string constants.
-// WARNING: When using UserMetricsAction, UserMetricsAction and a string literal
-// parameter must be on the same line, e.g.
-//   RecordAction(UserMetricsAction("my extremely long action name"));
-// or
-//   RenderThread::Get()->RecordAction(
-//       UserMetricsAction("my extremely long action name"));
-// because otherwise our processing scripts won't pick up on new actions.
+// WARNING: When using UserMetricsAction you should use a string literal
+// parameter e.g.
+//   RecordAction(UserMetricsAction("my action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
 // Please see tools/metrics/actions/extract_actions.py for details.
 struct UserMetricsAction {
   const char* str_;
diff --git a/base/move.h b/base/move.h
deleted file mode 100644
index 24bf9d7..0000000
--- a/base/move.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MOVE_H_
-#define BASE_MOVE_H_
-
-#include <utility>
-
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "build/build_config.h"
-
-// TODO(crbug.com/566182): DEPRECATED!
-// Use DISALLOW_COPY_AND_ASSIGN instead, or if your type will be used in
-// Callbacks, use DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND instead.
-#define MOVE_ONLY_TYPE_FOR_CPP_03(type) \
-  DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type)
-
-// A macro to disallow the copy constructor and copy assignment functions.
-// This should be used in the private: declarations for a class.
-//
-// Use this macro instead of DISALLOW_COPY_AND_ASSIGN if you want to pass
-// ownership of the type through a base::Callback without heap-allocating it
-// into a scoped_ptr.  The class must define a move constructor and move
-// assignment operator to make this work.
-//
-// This version of the macro adds a Pass() function and a cryptic
-// MoveOnlyTypeForCPP03 typedef for the base::Callback implementation to use.
-// See IsMoveOnlyType template and its usage in base/callback_internal.h
-// for more details.
-// TODO(crbug.com/566182): Remove this macro and use DISALLOW_COPY_AND_ASSIGN
-// everywhere instead.
-#if defined(OS_ANDROID) || defined(OS_LINUX)
-#define DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type)       \
- private:                                                       \
-  type(const type&) = delete;                                   \
-  void operator=(const type&) = delete;                         \
-                                                                \
- public:                                                        \
-  typedef void MoveOnlyTypeForCPP03;                            \
-                                                                \
- private:
-#else
-#define DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type)       \
- private:                                                       \
-  type(const type&) = delete;                                   \
-  void operator=(const type&) = delete;                         \
-                                                                \
- public:                                                        \
-  type&& Pass() WARN_UNUSED_RESULT { return std::move(*this); } \
-  typedef void MoveOnlyTypeForCPP03;                            \
-                                                                \
- private:
-#endif
-
-#endif  // BASE_MOVE_H_
diff --git a/base/native_library.h b/base/native_library.h
index 1e764da..b4f3a3c 100644
--- a/base/native_library.h
+++ b/base/native_library.h
@@ -11,8 +11,7 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
 #include "build/build_config.h"
 
 #if defined(OS_WIN)
@@ -26,7 +25,7 @@
 class FilePath;
 
 #if defined(OS_WIN)
-typedef HMODULE NativeLibrary;
+using NativeLibrary = HMODULE;
 #elif defined(OS_MACOSX)
 enum NativeLibraryType {
   BUNDLE,
@@ -46,9 +45,9 @@
     void* dylib;
   };
 };
-typedef NativeLibraryStruct* NativeLibrary;
+using NativeLibrary = NativeLibraryStruct*;
 #elif defined(OS_POSIX)
-typedef void* NativeLibrary;
+using NativeLibrary = void*;
 #endif  // OS_*
 
 struct BASE_EXPORT NativeLibraryLoadError {
@@ -87,13 +86,14 @@
 
 // Gets a function pointer from a native library.
 BASE_EXPORT void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
-                                                      const char* name);
+                                                      StringPiece name);
 
 // Returns the full platform specific name for a native library.
+// |name| must be ASCII.
 // For example:
 // "mylib" returns "mylib.dll" on Windows, "libmylib.so" on Linux,
-// "mylib.dylib" on Mac.
-BASE_EXPORT string16 GetNativeLibraryName(const string16& name);
+// "libmylib.dylib" on Mac.
+BASE_EXPORT std::string GetNativeLibraryName(StringPiece name);
 
 }  // namespace base
 
diff --git a/base/native_library_posix.cc b/base/native_library_posix.cc
index 3179a93..2dc434b 100644
--- a/base/native_library_posix.cc
+++ b/base/native_library_posix.cc
@@ -8,6 +8,7 @@
 
 #include "base/files/file_path.h"
 #include "base/logging.h"
+#include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/threading/thread_restrictions.h"
 
@@ -21,7 +22,7 @@
 NativeLibrary LoadNativeLibrary(const FilePath& library_path,
                                 NativeLibraryLoadError* error) {
   // dlopen() opens the file off disk.
-  base::ThreadRestrictions::AssertIOAllowed();
+  ThreadRestrictions::AssertIOAllowed();
 
   // We deliberately do not use RTLD_DEEPBIND.  For the history why, please
   // refer to the bug tracker.  Some useful bug reports to read include:
@@ -45,13 +46,14 @@
 
 // static
 void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
-                                          const char* name) {
-  return dlsym(library, name);
+                                          StringPiece name) {
+  return dlsym(library, name.data());
 }
 
 // static
-string16 GetNativeLibraryName(const string16& name) {
-  return ASCIIToUTF16("lib") + name + ASCIIToUTF16(".so");
+std::string GetNativeLibraryName(StringPiece name) {
+  DCHECK(IsStringASCII(name));
+  return "lib" + name.as_string() + ".so";
 }
 
 }  // namespace base
diff --git a/base/numerics/safe_conversions.h b/base/numerics/safe_conversions.h
index baac188..6b558af 100644
--- a/base/numerics/safe_conversions.h
+++ b/base/numerics/safe_conversions.h
@@ -18,7 +18,7 @@
 // Convenience function that returns true if the supplied value is in range
 // for the destination type.
 template <typename Dst, typename Src>
-inline bool IsValueInRangeForNumericType(Src value) {
+constexpr bool IsValueInRangeForNumericType(Src value) {
   return internal::DstRangeRelationToSrcRange<Dst>(value) ==
          internal::RANGE_VALID;
 }
@@ -26,7 +26,7 @@
 // Convenience function for determining if a numeric value is negative without
 // throwing compiler warnings on: unsigned(value) < 0.
 template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+constexpr typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
 IsValueNegative(T value) {
   static_assert(std::numeric_limits<T>::is_specialized,
                 "Argument must be numeric.");
@@ -34,8 +34,8 @@
 }
 
 template <typename T>
-typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
-    IsValueNegative(T) {
+constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed,
+                                  bool>::type IsValueNegative(T) {
   static_assert(std::numeric_limits<T>::is_specialized,
                 "Argument must be numeric.");
   return false;
@@ -62,11 +62,29 @@
 // HandleNaN will return 0 in this case.
 struct SaturatedCastNaNBehaviorReturnZero {
   template <typename T>
-  static T HandleNaN() {
+  static constexpr T HandleNaN() {
     return T();
   }
 };
 
+namespace internal {
+// This wrapper is used for C++11 constexpr support by avoiding the declaration
+// of local variables in the saturated_cast template function.
+template <typename Dst, class NaNHandler, typename Src>
+constexpr Dst saturated_cast_impl(const Src value,
+                                  const RangeConstraint constraint) {
+  return constraint == RANGE_VALID
+             ? static_cast<Dst>(value)
+             : (constraint == RANGE_UNDERFLOW
+                    ? std::numeric_limits<Dst>::min()
+                    : (constraint == RANGE_OVERFLOW
+                           ? std::numeric_limits<Dst>::max()
+                           : (constraint == RANGE_INVALID
+                                  ? NaNHandler::template HandleNaN<Dst>()
+                                  : (NOTREACHED(), static_cast<Dst>(value)))));
+}
+}  // namespace internal
+
 // saturated_cast<> is analogous to static_cast<> for numeric types, except
 // that the specified numeric conversion will saturate rather than overflow or
 // underflow. NaN assignment to an integral will defer the behavior to a
@@ -74,35 +92,18 @@
 template <typename Dst,
           class NaNHandler = SaturatedCastNaNBehaviorReturnZero,
           typename Src>
-inline Dst saturated_cast(Src value) {
-  // Optimization for floating point values, which already saturate.
-  if (std::numeric_limits<Dst>::is_iec559)
-    return static_cast<Dst>(value);
-
-  switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
-    case internal::RANGE_VALID:
-      return static_cast<Dst>(value);
-
-    case internal::RANGE_UNDERFLOW:
-      return std::numeric_limits<Dst>::min();
-
-    case internal::RANGE_OVERFLOW:
-      return std::numeric_limits<Dst>::max();
-
-    // Should fail only on attempting to assign NaN to a saturated integer.
-    case internal::RANGE_INVALID:
-      return NaNHandler::template HandleNaN<Dst>();
-  }
-
-  NOTREACHED();
-  return static_cast<Dst>(value);
+constexpr Dst saturated_cast(Src value) {
+  return std::numeric_limits<Dst>::is_iec559
+             ? static_cast<Dst>(value)  // Floating point optimization.
+             : internal::saturated_cast_impl<Dst, NaNHandler>(
+                   value, internal::DstRangeRelationToSrcRange<Dst>(value));
 }
 
 // strict_cast<> is analogous to static_cast<> for numeric types, except that
 // it will cause a compile failure if the destination type is not large enough
 // to contain any value in the source type. It performs no runtime checking.
 template <typename Dst, typename Src>
-inline Dst strict_cast(Src value) {
+constexpr Dst strict_cast(Src value) {
   static_assert(std::numeric_limits<Src>::is_specialized,
                 "Argument must be numeric.");
   static_assert(std::numeric_limits<Dst>::is_specialized,
@@ -128,33 +129,33 @@
 // compiles cleanly with truncation warnings enabled.
 // This template should introduce no runtime overhead, but it also provides no
 // runtime checking of any of the associated mathematical operations. Use
-// CheckedNumeric for runtime range checks of tha actual value being assigned.
+// CheckedNumeric for runtime range checks of the actual value being assigned.
 template <typename T>
 class StrictNumeric {
  public:
   typedef T type;
 
-  StrictNumeric() : value_(0) {}
+  constexpr StrictNumeric() : value_(0) {}
 
   // Copy constructor.
   template <typename Src>
-  StrictNumeric(const StrictNumeric<Src>& rhs)
+  constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
       : value_(strict_cast<T>(rhs.value_)) {}
 
   // This is not an explicit constructor because we implicitly upgrade regular
   // numerics to StrictNumerics to make them easier to use.
   template <typename Src>
-  StrictNumeric(Src value)
+  constexpr StrictNumeric(Src value)
       : value_(strict_cast<T>(value)) {}
 
   // The numeric cast operator basically handles all the magic.
   template <typename Dst>
-  operator Dst() const {
+  constexpr operator Dst() const {
     return strict_cast<Dst>(value_);
   }
 
  private:
-  T value_;
+  const T value_;
 };
 
 // Explicitly make a shorter size_t typedef for convenience.
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
index 181dd7e..0f0aebc 100644
--- a/base/numerics/safe_conversions_impl.h
+++ b/base/numerics/safe_conversions_impl.h
@@ -8,10 +8,9 @@
 #include <limits.h>
 #include <stdint.h>
 
+#include <climits>
 #include <limits>
 
-#include "base/template_util.h"
-
 namespace base {
 namespace internal {
 
@@ -20,9 +19,11 @@
 // for accurate range comparisons between floating point and integer types.
 template <typename NumericType>
 struct MaxExponent {
+  static_assert(std::is_arithmetic<NumericType>::value,
+                "Argument must be numeric.");
   static const int value = std::numeric_limits<NumericType>::is_iec559
                                ? std::numeric_limits<NumericType>::max_exponent
-                               : (sizeof(NumericType) * 8 + 1 -
+                               : (sizeof(NumericType) * CHAR_BIT + 1 -
                                   std::numeric_limits<NumericType>::is_signed);
 };
 
@@ -96,17 +97,18 @@
 };
 
 // Helper function for coercing an int back to a RangeContraint.
-inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
-  DCHECK(integer_range_constraint >= RANGE_VALID &&
-         integer_range_constraint <= RANGE_INVALID);
+constexpr RangeConstraint GetRangeConstraint(int integer_range_constraint) {
+  // TODO(jschuh): Once we get full C++14 support we want this
+  // assert(integer_range_constraint >= RANGE_VALID &&
+  //        integer_range_constraint <= RANGE_INVALID)
   return static_cast<RangeConstraint>(integer_range_constraint);
 }
 
 // This function creates a RangeConstraint from an upper and lower bound
 // check by taking advantage of the fact that only NaN can be out of range in
 // both directions at once.
-inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
-                                   bool is_in_lower_bound) {
+constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
+                                                    bool is_in_lower_bound) {
   return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
                             (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
 }
@@ -136,25 +138,24 @@
 struct NarrowingRange {
   typedef typename std::numeric_limits<Src> SrcLimits;
   typedef typename std::numeric_limits<Dst> DstLimits;
+  // The following logic avoids warnings where the max function is
+  // instantiated with invalid values for a bit shift (even though
+  // such a function can never be called).
+  static const int shift = (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+                            SrcLimits::digits < DstLimits::digits &&
+                            SrcLimits::is_iec559 &&
+                            DstLimits::is_integer)
+                               ? (DstLimits::digits - SrcLimits::digits)
+                               : 0;
 
-  static Dst max() {
-    // The following logic avoids warnings where the max function is
-    // instantiated with invalid values for a bit shift (even though
-    // such a function can never be called).
-    static const int shift =
-        (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
-         SrcLimits::digits < DstLimits::digits && SrcLimits::is_iec559 &&
-         DstLimits::is_integer)
-            ? (DstLimits::digits - SrcLimits::digits)
-            : 0;
-
+  static constexpr Dst max() {
     // We use UINTMAX_C below to avoid compiler warnings about shifting floating
     // points. Since it's a compile time calculation, it shouldn't have any
     // performance impact.
     return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
   }
 
-  static Dst min() {
+  static constexpr Dst min() {
     return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
                                                : DstLimits::min();
   }
@@ -187,7 +188,7 @@
                                       DstSign,
                                       SrcSign,
                                       NUMERIC_RANGE_CONTAINED> {
-  static RangeConstraint Check(Src /* value */) { return RANGE_VALID; }
+  static constexpr RangeConstraint Check(Src /*value*/) { return RANGE_VALID; }
 };
 
 // Signed to signed narrowing: Both the upper and lower boundaries may be
@@ -198,7 +199,7 @@
                                       INTEGER_REPRESENTATION_SIGNED,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
                               (value >= NarrowingRange<Dst, Src>::min()));
   }
@@ -211,7 +212,7 @@
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
   }
 };
@@ -223,7 +224,7 @@
                                       INTEGER_REPRESENTATION_SIGNED,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return sizeof(Dst) > sizeof(Src)
                ? RANGE_VALID
                : GetRangeConstraint(
@@ -240,7 +241,7 @@
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
                ? GetRangeConstraint(true, value >= static_cast<Src>(0))
                : GetRangeConstraint(
@@ -250,7 +251,7 @@
 };
 
 template <typename Dst, typename Src>
-inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
+constexpr RangeConstraint DstRangeRelationToSrcRange(Src value) {
   static_assert(std::numeric_limits<Src>::is_specialized,
                 "Argument must be numeric.");
   static_assert(std::numeric_limits<Dst>::is_specialized,
diff --git a/base/numerics/safe_math.h b/base/numerics/safe_math.h
index d169690..d0003b7 100644
--- a/base/numerics/safe_math.h
+++ b/base/numerics/safe_math.h
@@ -7,6 +7,10 @@
 
 #include <stddef.h>
 
+#include <limits>
+#include <type_traits>
+
+#include "base/logging.h"
 #include "base/numerics/safe_math_impl.h"
 
 namespace base {
@@ -44,6 +48,9 @@
 //     Do stuff...
 template <typename T>
 class CheckedNumeric {
+  static_assert(std::is_arithmetic<T>::value,
+                "CheckedNumeric<T>: T must be a numeric type.");
+
  public:
   typedef T type;
 
@@ -61,7 +68,7 @@
   // This is not an explicit constructor because we implicitly upgrade regular
   // numerics to CheckedNumerics to make them easier to use.
   template <typename Src>
-  CheckedNumeric(Src value)
+  CheckedNumeric(Src value)  // NOLINT(runtime/explicit)
       : state_(value) {
     static_assert(std::numeric_limits<Src>::is_specialized,
                   "Argument must be numeric.");
@@ -70,7 +77,7 @@
   // This is not an explicit constructor because we want a seamless conversion
   // from StrictNumeric types.
   template <typename Src>
-  CheckedNumeric(StrictNumeric<Src> value)
+  CheckedNumeric(StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
       : state_(static_cast<Src>(value)) {
   }
 
@@ -190,7 +197,7 @@
   template <typename Src>
   static CheckedNumeric<T> cast(
       const CheckedNumeric<Src>& u,
-      typename std::enable_if<!is_same<Src, T>::value, int>::type = 0) {
+      typename std::enable_if<!std::is_same<Src, T>::value, int>::type = 0) {
     return u;
   }
 
@@ -231,10 +238,9 @@
           lhs.ValueUnsafe() OP rhs.ValueUnsafe(),                             \
           GetRangeConstraint(rhs.validity() | lhs.validity()));               \
     RangeConstraint validity = RANGE_VALID;                                   \
-    T result = static_cast<T>(Checked##NAME(                                  \
-        static_cast<Promotion>(lhs.ValueUnsafe()),                            \
-        static_cast<Promotion>(rhs.ValueUnsafe()),                            \
-        &validity));                                                          \
+    T result = static_cast<T>(                                                \
+        Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()),              \
+                      static_cast<Promotion>(rhs.ValueUnsafe()), &validity)); \
     return CheckedNumeric<Promotion>(                                         \
         result,                                                               \
         GetRangeConstraint(validity | lhs.validity() | rhs.validity()));      \
@@ -260,7 +266,9 @@
         OP CheckedNumeric<Promotion>::cast(rhs);                              \
   }                                                                           \
   /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
-  template <typename T, typename Src>                                         \
+  template <typename T, typename Src,                                         \
+            typename std::enable_if<std::is_arithmetic<Src>::value>::type* =  \
+                nullptr>                                                      \
   CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
       const CheckedNumeric<T>& lhs, Src rhs) {                                \
     typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
@@ -270,8 +278,10 @@
     return CheckedNumeric<Promotion>::cast(lhs)                               \
         OP CheckedNumeric<Promotion>::cast(rhs);                              \
   }                                                                           \
-  /* Binary arithmetic operator for right numeric and left CheckedNumeric. */ \
-  template <typename T, typename Src>                                         \
+  /* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \
+  template <typename T, typename Src,                                         \
+            typename std::enable_if<std::is_arithmetic<Src>::value>::type* =  \
+                nullptr>                                                      \
   CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
       Src lhs, const CheckedNumeric<T>& rhs) {                                \
     typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
diff --git a/base/numerics/safe_math_impl.h b/base/numerics/safe_math_impl.h
index f5ec2b8..f214f3f 100644
--- a/base/numerics/safe_math_impl.h
+++ b/base/numerics/safe_math_impl.h
@@ -8,13 +8,13 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <climits>
 #include <cmath>
 #include <cstdlib>
 #include <limits>
 #include <type_traits>
 
 #include "base/numerics/safe_conversions.h"
-#include "base/template_util.h"
 
 namespace base {
 namespace internal {
@@ -90,7 +90,7 @@
 struct PositionOfSignBit {
   static const typename std::enable_if<std::numeric_limits<Integer>::is_integer,
                                        size_t>::type value =
-      8 * sizeof(Integer) - 1;
+      CHAR_BIT * sizeof(Integer) - 1;
 };
 
 // This is used for UnsignedAbs, where we need to support floating-point
@@ -115,7 +115,7 @@
 // Helper templates for integer manipulations.
 
 template <typename T>
-bool HasSignBit(T x) {
+constexpr bool HasSignBit(T x) {
   // Cast to unsigned since right shift on signed is undefined.
   return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
             PositionOfSignBit<T>::value);
@@ -123,8 +123,8 @@
 
 // This wrapper undoes the standard integer promotions.
 template <typename T>
-T BinaryComplement(T x) {
-  return ~x;
+constexpr T BinaryComplement(T x) {
+  return static_cast<T>(~x);
 }
 
 // Here are the actual portable checked integer math implementations.
@@ -139,15 +139,16 @@
   typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
   UnsignedDst ux = static_cast<UnsignedDst>(x);
   UnsignedDst uy = static_cast<UnsignedDst>(y);
-  UnsignedDst uresult = ux + uy;
+  UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
   // Addition is valid if the sign of (x + y) is equal to either that of x or
   // that of y.
   if (std::numeric_limits<T>::is_signed) {
-    if (HasSignBit(BinaryComplement((uresult ^ ux) & (uresult ^ uy))))
+    if (HasSignBit(BinaryComplement(
+            static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))) {
       *validity = RANGE_VALID;
-    else  // Direction of wrap is inverse of result sign.
+    } else {  // Direction of wrap is inverse of result sign.
       *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
-
+    }
   } else {  // Unsigned is either valid or overflow.
     *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
   }
@@ -162,15 +163,16 @@
   typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
   UnsignedDst ux = static_cast<UnsignedDst>(x);
   UnsignedDst uy = static_cast<UnsignedDst>(y);
-  UnsignedDst uresult = ux - uy;
+  UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
   // Subtraction is valid if either x and y have same sign, or (x-y) and x have
   // the same sign.
   if (std::numeric_limits<T>::is_signed) {
-    if (HasSignBit(BinaryComplement((uresult ^ ux) & (ux ^ uy))))
+    if (HasSignBit(BinaryComplement(
+            static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))) {
       *validity = RANGE_VALID;
-    else  // Direction of wrap is inverse of result sign.
+    } else {  // Direction of wrap is inverse of result sign.
       *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
-
+    }
   } else {  // Unsigned is either valid or underflow.
     *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
   }
@@ -201,7 +203,8 @@
 CheckedMul(T x, T y, RangeConstraint* validity) {
   // If either side is zero then the result will be zero.
   if (!x || !y) {
-    return RANGE_VALID;
+    *validity = RANGE_VALID;
+    return static_cast<T>(0);
 
   } else if (x > 0) {
     if (y > 0)
@@ -220,7 +223,7 @@
           y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
   }
 
-  return x * y;
+  return static_cast<T>(x * y);
 }
 
 template <typename T>
@@ -232,7 +235,7 @@
   *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
                   ? RANGE_VALID
                   : RANGE_OVERFLOW;
-  return x * y;
+  return static_cast<T>(x * y);
 }
 
 // Division just requires a check for an invalid negation on signed min/-1.
@@ -249,7 +252,7 @@
   }
 
   *validity = RANGE_VALID;
-  return x / y;
+  return static_cast<T>(x / y);
 }
 
 template <typename T>
@@ -258,7 +261,7 @@
                         T>::type
 CheckedMod(T x, T y, RangeConstraint* validity) {
   *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
-  return x % y;
+  return static_cast<T>(x % y);
 }
 
 template <typename T>
@@ -267,7 +270,7 @@
                         T>::type
 CheckedMod(T x, T y, RangeConstraint* validity) {
   *validity = RANGE_VALID;
-  return x % y;
+  return static_cast<T>(x % y);
 }
 
 template <typename T>
@@ -278,7 +281,7 @@
   *validity =
       value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
   // The negation of signed min is min, so catch that one.
-  return -value;
+  return static_cast<T>(-value);
 }
 
 template <typename T>
@@ -329,7 +332,7 @@
                         T>::type
 CheckedUnsignedAbs(T value) {
   // T is unsigned, so |value| must already be positive.
-  return value;
+  return static_cast<T>(value);
 }
 
 // These are the floating point stubs that the compiler needs to see. Only the
@@ -339,7 +342,7 @@
   typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type \
       Checked##NAME(T, T, RangeConstraint*) {                         \
     NOTREACHED();                                                     \
-    return 0;                                                         \
+    return static_cast<T>(0);                                         \
   }
 
 BASE_FLOAT_ARITHMETIC_STUBS(Add)
@@ -354,14 +357,14 @@
 typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
     T value,
     RangeConstraint*) {
-  return -value;
+  return static_cast<T>(-value);
 }
 
 template <typename T>
 typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
     T value,
     RangeConstraint*) {
-  return std::abs(value);
+  return static_cast<T>(std::abs(value));
 }
 
 // Floats carry around their validity state with them, but integers do not. So,
@@ -391,7 +394,7 @@
 class CheckedNumericState<T, NUMERIC_INTEGER> {
  private:
   T value_;
-  RangeConstraint validity_;
+  RangeConstraint validity_ : CHAR_BIT;  // Actually requires only two bits.
 
  public:
   template <typename Src, NumericRepresentation type>
@@ -442,7 +445,7 @@
   template <typename Src>
   CheckedNumericState(
       Src value,
-      RangeConstraint /* validity */,
+      RangeConstraint /*validity*/,
       typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type =
           0) {
     switch (DstRangeRelationToSrcRange<T>(value)) {
@@ -486,27 +489,16 @@
   T value() const { return value_; }
 };
 
-// For integers less than 128-bit and floats 32-bit or larger, we can distil
-// C/C++ arithmetic promotions down to two simple rules:
-// 1. The type with the larger maximum exponent always takes precedence.
-// 2. The resulting type must be promoted to at least an int.
-// The following template specializations implement that promotion logic.
-enum ArithmeticPromotionCategory {
-  LEFT_PROMOTION,
-  RIGHT_PROMOTION,
-  DEFAULT_PROMOTION
-};
+// For integers less than 128-bit and floats 32-bit or larger, we have the type
+// with the larger maximum exponent take precedence.
+enum ArithmeticPromotionCategory { LEFT_PROMOTION, RIGHT_PROMOTION };
 
 template <typename Lhs,
           typename Rhs = Lhs,
           ArithmeticPromotionCategory Promotion =
               (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
-                  ? (MaxExponent<Lhs>::value > MaxExponent<int>::value
-                         ? LEFT_PROMOTION
-                         : DEFAULT_PROMOTION)
-                  : (MaxExponent<Rhs>::value > MaxExponent<int>::value
-                         ? RIGHT_PROMOTION
-                         : DEFAULT_PROMOTION) >
+                  ? LEFT_PROMOTION
+                  : RIGHT_PROMOTION>
 struct ArithmeticPromotion;
 
 template <typename Lhs, typename Rhs>
@@ -519,11 +511,6 @@
   typedef Rhs type;
 };
 
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<Lhs, Rhs, DEFAULT_PROMOTION> {
-  typedef int type;
-};
-
 // We can statically check if operations on the provided types can wrap, so we
 // can skip the checked operations if they're not needed. So, for an integer we
 // care if the destination type preserves the sign and is twice the width of
diff --git a/base/numerics/safe_numerics_unittest.cc b/base/numerics/safe_numerics_unittest.cc
index cb63ad0..4be7ab5 100644
--- a/base/numerics/safe_numerics_unittest.cc
+++ b/base/numerics/safe_numerics_unittest.cc
@@ -11,7 +11,6 @@
 #include "base/compiler_specific.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/numerics/safe_math.h"
-#include "base/template_util.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -64,10 +63,13 @@
 
 // Helper macros to wrap displaying the conversion types and line numbers.
 #define TEST_EXPECTED_VALIDITY(expected, actual)                           \
-  EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).validity())              \
+  EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid())               \
       << "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
       << " on line " << line;
 
+#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
+#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
+
 #define TEST_EXPECTED_VALUE(expected, actual)                                \
   EXPECT_EQ(static_cast<Dst>(expected),                                      \
             CheckedNumeric<Dst>(actual).ValueUnsafe())                       \
@@ -83,43 +85,32 @@
                                 numeric_limits<Dst>::is_signed,
                             int>::type = 0) {
   typedef numeric_limits<Dst> DstLimits;
-  TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
-                         -CheckedNumeric<Dst>(DstLimits::min()));
-  TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()).Abs());
+  TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()).Abs());
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::max()) + -1);
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) + -1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) + -DstLimits::max());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+                        -DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) - 1);
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) - -1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW,
-      CheckedNumeric<Dst>(DstLimits::max()) - -DstLimits::max());
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) - DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) - -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+                        -DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+                        DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) * 2);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * 2);
 
-  TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) / -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
 
   // Modulus is legal only for integers.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
   TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
-  TEST_EXPECTED_VALIDITY(RANGE_INVALID, CheckedNumeric<Dst>(-1) % -2);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-1) % -2);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
   // Test all the different modulus combinations.
@@ -139,19 +130,14 @@
                                 !numeric_limits<Dst>::is_signed,
                             int>::type = 0) {
   typedef numeric_limits<Dst> DstLimits;
-  TEST_EXPECTED_VALIDITY(RANGE_VALID, -CheckedNumeric<Dst>(DstLimits::min()));
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()).Abs());
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) + -1);
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) - 1);
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
-  TEST_EXPECTED_VALIDITY(
-      RANGE_VALID,
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
+  TEST_EXPECTED_SUCCESS(
       CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
           std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
           .UnsignedAbs());
@@ -177,29 +163,22 @@
     int line,
     typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
   typedef numeric_limits<Dst> DstLimits;
-  TEST_EXPECTED_VALIDITY(RANGE_VALID, -CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()).Abs());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) + -1);
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::max()) + 1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) + -DstLimits::max());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+                        -DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW,
-      CheckedNumeric<Dst>(DstLimits::max()) - -DstLimits::max());
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) - DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+                        -DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+                        DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) * 2);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) * 2);
 
   TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
   EXPECT_EQ(static_cast<Dst>(1.0), CheckedNumeric<Dst>(1.0).ValueFloating());
@@ -259,17 +238,15 @@
   TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
   TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) + 1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW, CheckedNumeric<Dst>(DstLimits::max()) + DstLimits::max());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
+                        DstLimits::max());
 
   // Generic subtraction.
   TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
   TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::max()) - 1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
 
   // Generic multiplication.
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
@@ -278,8 +255,8 @@
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW, CheckedNumeric<Dst>(DstLimits::max()) * DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+                        DstLimits::max());
 
   // Generic division.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
@@ -350,18 +327,18 @@
                   "Comparison must be sign preserving and value preserving");
 
     const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
-    TEST_EXPECTED_VALIDITY(RANGE_VALID, checked_dst);
+    TEST_EXPECTED_SUCCESS(checked_dst);
     if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
       if (MaxExponent<Dst>::value >= MaxExponent<Src>::value * 2 - 1) {
         // At least twice larger type.
-        TEST_EXPECTED_VALIDITY(RANGE_VALID, SrcLimits::max() * checked_dst);
+        TEST_EXPECTED_SUCCESS(SrcLimits::max() * checked_dst);
 
       } else {  // Larger, but not at least twice as large.
-        TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, SrcLimits::max() * checked_dst);
-        TEST_EXPECTED_VALIDITY(RANGE_VALID, checked_dst + 1);
+        TEST_EXPECTED_FAILURE(SrcLimits::max() * checked_dst);
+        TEST_EXPECTED_SUCCESS(checked_dst + 1);
       }
     } else {  // Same width type.
-      TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + 1);
+      TEST_EXPECTED_FAILURE(checked_dst + 1);
     }
 
     TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
@@ -390,9 +367,9 @@
                   "Destination must be narrower than source");
 
     const CheckedNumeric<Dst> checked_dst;
-    TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst - SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst - SrcLimits::max());
 
     TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
     TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
@@ -419,7 +396,7 @@
       TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
       TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
     } else {
-      TEST_EXPECTED_VALIDITY(RANGE_INVALID, checked_dst - static_cast<Src>(1));
+      TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
       TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
     }
   }
@@ -437,8 +414,8 @@
 
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + static_cast<Src>(-1));
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + -SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
 
     TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
     TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
@@ -460,9 +437,9 @@
 
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
-    TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + static_cast<Src>(-1));
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + -SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
 
     TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
     TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
@@ -502,7 +479,7 @@
 
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
-    TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
     TEST_EXPECTED_VALUE(SrcLimits::min(), checked_dst + SrcLimits::min());
 
     TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
diff --git a/base/observer_list.h b/base/observer_list.h
index 3156421..afe1f46 100644
--- a/base/observer_list.h
+++ b/base/observer_list.h
@@ -236,8 +236,8 @@
 #define FOR_EACH_OBSERVER(ObserverType, observer_list, func)             \
   do {                                                                   \
     if ((observer_list).might_have_observers()) {                        \
-      base::ObserverListBase<ObserverType>::Iterator it_inside_observer_macro( \
-          &observer_list);                                               \
+      typename base::ObserverListBase<ObserverType>::Iterator            \
+          it_inside_observer_macro(&observer_list);                      \
       ObserverType* obs;                                                 \
       while ((obs = it_inside_observer_macro.GetNext()) != nullptr)      \
         obs->func;                                                       \
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
index 6154ae9..fe78354 100644
--- a/base/observer_list_threadsafe.h
+++ b/base/observer_list_threadsafe.h
@@ -7,6 +7,7 @@
 
 #include <algorithm>
 #include <map>
+#include <tuple>
 
 #include "base/bind.h"
 #include "base/location.h"
@@ -17,8 +18,8 @@
 #include "base/observer_list.h"
 #include "base/single_thread_task_runner.h"
 #include "base/stl_util.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 ///////////////////////////////////////////////////////////////////////////////
 //
@@ -177,8 +178,8 @@
   void Notify(const tracked_objects::Location& from_here,
               Method m,
               const Params&... params) {
-    internal::UnboundMethod<ObserverType, Method, Tuple<Params...>> method(
-        m, MakeTuple(params...));
+    internal::UnboundMethod<ObserverType, Method, std::tuple<Params...>> method(
+        m, std::make_tuple(params...));
 
     AutoLock lock(list_lock_);
     for (const auto& entry : observer_lists_) {
@@ -186,8 +187,8 @@
       context->task_runner->PostTask(
           from_here,
           Bind(&ObserverListThreadSafe<ObserverType>::template NotifyWrapper<
-                  Method, Tuple<Params...>>,
-              this, context, method));
+                   Method, std::tuple<Params...>>,
+               this, context, method));
     }
   }
 
diff --git a/base/observer_list_unittest.cc b/base/observer_list_unittest.cc
index 2e51e45..097a2ed 100644
--- a/base/observer_list_unittest.cc
+++ b/base/observer_list_unittest.cc
@@ -111,7 +111,7 @@
     loop_->task_runner()->PostTask(
         FROM_HERE,
         base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
-    loop_->Run();
+    RunLoop().Run();
     //LOG(ERROR) << "Loop 0x" << std::hex << loop_ << " done. " <<
     //    count_observes_ << ", " << count_addtask_;
     delete loop_;
diff --git a/base/optional.h b/base/optional.h
new file mode 100644
index 0000000..b468964
--- /dev/null
+++ b/base/optional.h
@@ -0,0 +1,457 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OPTIONAL_H_
+#define BASE_OPTIONAL_H_
+
+#include <type_traits>
+
+#include "base/logging.h"
+#include "base/memory/aligned_memory.h"
+#include "base/template_util.h"
+
+namespace base {
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+struct in_place_t {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+struct nullopt_t {
+  constexpr explicit nullopt_t(int) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place
+constexpr in_place_t in_place = {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+constexpr nullopt_t nullopt(0);
+
+namespace internal {
+
+template <typename T, bool = base::is_trivially_destructible<T>::value>
+struct OptionalStorage {
+  // When T is not trivially destructible we must call its
+  // destructor before deallocating its memory.
+  ~OptionalStorage() {
+    if (!is_null_)
+      buffer_.template data_as<T>()->~T();
+  }
+
+  bool is_null_ = true;
+  base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+template <typename T>
+struct OptionalStorage<T, true> {
+  // When T is trivially destructible (i.e. its destructor does nothing)
+  // there is no need to call it.
+  // Since |base::AlignedMemory| is just an array its destructor
+  // is trivial. Explicitly defaulting the destructor means it's not
+  // user-provided. All of this together make this destructor trivial.
+  ~OptionalStorage() = default;
+
+  bool is_null_ = true;
+  base::AlignedMemory<sizeof(T), ALIGNOF(T)> buffer_;
+};
+
+}  // namespace internal
+
+// base::Optional is a Chromium version of the C++17 optional class:
+// std::optional documentation:
+// http://en.cppreference.com/w/cpp/utility/optional
+// Chromium documentation:
+// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
+//
+// These are the differences between the specification and the implementation:
+// - The constructor and emplace method using initializer_list are not
+//   implemented because 'initializer_list' is banned from Chromium.
+// - Constructors do not use 'constexpr' as it is a C++14 extension.
+// - 'constexpr' might be missing in some places for reasons specified locally.
+// - No exceptions are thrown, because they are banned from Chromium.
+// - All the non-members are in the 'base' namespace instead of 'std'.
+template <typename T>
+class Optional {
+ public:
+  using value_type = T;
+
+  constexpr Optional() = default;
+  Optional(base::nullopt_t) : Optional() {}
+
+  Optional(const Optional& other) {
+    if (!other.storage_.is_null_)
+      Init(other.value());
+  }
+
+  Optional(Optional&& other) {
+    if (!other.storage_.is_null_)
+      Init(std::move(other.value()));
+  }
+
+  Optional(const T& value) { Init(value); }
+
+  Optional(T&& value) { Init(std::move(value)); }
+
+  template <class... Args>
+  explicit Optional(base::in_place_t, Args&&... args) {
+    emplace(std::forward<Args>(args)...);
+  }
+
+  ~Optional() = default;
+
+  Optional& operator=(base::nullopt_t) {
+    FreeIfNeeded();
+    return *this;
+  }
+
+  Optional& operator=(const Optional& other) {
+    if (other.storage_.is_null_) {
+      FreeIfNeeded();
+      return *this;
+    }
+
+    InitOrAssign(other.value());
+    return *this;
+  }
+
+  Optional& operator=(Optional&& other) {
+    if (other.storage_.is_null_) {
+      FreeIfNeeded();
+      return *this;
+    }
+
+    InitOrAssign(std::move(other.value()));
+    return *this;
+  }
+
+  template <class U>
+  typename std::enable_if<std::is_same<std::decay<U>, T>::value,
+                          Optional&>::type
+  operator=(U&& value) {
+    InitOrAssign(std::forward<U>(value));
+    return *this;
+  }
+
+  // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+  const T* operator->() const {
+    DCHECK(!storage_.is_null_);
+    return &value();
+  }
+
+  // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+  // meant to be 'constexpr const'.
+  T* operator->() {
+    DCHECK(!storage_.is_null_);
+    return &value();
+  }
+
+  constexpr const T& operator*() const& { return value(); }
+
+  // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+  // meant to be 'constexpr const'.
+  T& operator*() & { return value(); }
+
+  constexpr const T&& operator*() const&& { return std::move(value()); }
+
+  // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+  // meant to be 'constexpr const'.
+  T&& operator*() && { return std::move(value()); }
+
+  constexpr explicit operator bool() const { return !storage_.is_null_; }
+
+  // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+  // meant to be 'constexpr const'.
+  T& value() & {
+    DCHECK(!storage_.is_null_);
+    return *storage_.buffer_.template data_as<T>();
+  }
+
+  // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+  const T& value() const& {
+    DCHECK(!storage_.is_null_);
+    return *storage_.buffer_.template data_as<T>();
+  }
+
+  // TODO(mlamouri): using 'constexpr' here breaks compiler that assume it was
+  // meant to be 'constexpr const'.
+  T&& value() && {
+    DCHECK(!storage_.is_null_);
+    return std::move(*storage_.buffer_.template data_as<T>());
+  }
+
+  // TODO(mlamouri): can't use 'constexpr' with DCHECK.
+  const T&& value() const&& {
+    DCHECK(!storage_.is_null_);
+    return std::move(*storage_.buffer_.template data_as<T>());
+  }
+
+  template <class U>
+  constexpr T value_or(U&& default_value) const& {
+    // TODO(mlamouri): add the following assert when possible:
+    // static_assert(std::is_copy_constructible<T>::value,
+    //               "T must be copy constructible");
+    static_assert(std::is_convertible<U, T>::value,
+                  "U must be convertible to T");
+    return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+                             : value();
+  }
+
+  template <class U>
+  T value_or(U&& default_value) && {
+    // TODO(mlamouri): add the following assert when possible:
+    // static_assert(std::is_move_constructible<T>::value,
+    //               "T must be move constructible");
+    static_assert(std::is_convertible<U, T>::value,
+                  "U must be convertible to T");
+    return storage_.is_null_ ? static_cast<T>(std::forward<U>(default_value))
+                             : std::move(value());
+  }
+
+  void swap(Optional& other) {
+    if (storage_.is_null_ && other.storage_.is_null_)
+      return;
+
+    if (storage_.is_null_ != other.storage_.is_null_) {
+      if (storage_.is_null_) {
+        Init(std::move(*other.storage_.buffer_.template data_as<T>()));
+        other.FreeIfNeeded();
+      } else {
+        other.Init(std::move(*storage_.buffer_.template data_as<T>()));
+        FreeIfNeeded();
+      }
+      return;
+    }
+
+    DCHECK(!storage_.is_null_ && !other.storage_.is_null_);
+    using std::swap;
+    swap(**this, *other);
+  }
+
+  template <class... Args>
+  void emplace(Args&&... args) {
+    FreeIfNeeded();
+    Init(std::forward<Args>(args)...);
+  }
+
+ private:
+  void Init(const T& value) {
+    DCHECK(storage_.is_null_);
+    new (storage_.buffer_.void_data()) T(value);
+    storage_.is_null_ = false;
+  }
+
+  void Init(T&& value) {
+    DCHECK(storage_.is_null_);
+    new (storage_.buffer_.void_data()) T(std::move(value));
+    storage_.is_null_ = false;
+  }
+
+  template <class... Args>
+  void Init(Args&&... args) {
+    DCHECK(storage_.is_null_);
+    new (storage_.buffer_.void_data()) T(std::forward<Args>(args)...);
+    storage_.is_null_ = false;
+  }
+
+  void InitOrAssign(const T& value) {
+    if (storage_.is_null_)
+      Init(value);
+    else
+      *storage_.buffer_.template data_as<T>() = value;
+  }
+
+  void InitOrAssign(T&& value) {
+    if (storage_.is_null_)
+      Init(std::move(value));
+    else
+      *storage_.buffer_.template data_as<T>() = std::move(value);
+  }
+
+  void FreeIfNeeded() {
+    if (storage_.is_null_)
+      return;
+    storage_.buffer_.template data_as<T>()->~T();
+    storage_.is_null_ = true;
+  }
+
+  internal::OptionalStorage<T> storage_;
+};
+
+template <class T>
+constexpr bool operator==(const Optional<T>& lhs, const Optional<T>& rhs) {
+  return !!lhs != !!rhs ? false : lhs == nullopt || (*lhs == *rhs);
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& lhs, const Optional<T>& rhs) {
+  return !(lhs == rhs);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& lhs, const Optional<T>& rhs) {
+  return rhs == nullopt ? false : (lhs == nullopt ? true : *lhs < *rhs);
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& lhs, const Optional<T>& rhs) {
+  return !(rhs < lhs);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& lhs, const Optional<T>& rhs) {
+  return rhs < lhs;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& lhs, const Optional<T>& rhs) {
+  return !(lhs < rhs);
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, base::nullopt_t) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator==(base::nullopt_t, const Optional<T>& opt) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, base::nullopt_t) {
+  return !!opt;
+}
+
+template <class T>
+constexpr bool operator!=(base::nullopt_t, const Optional<T>& opt) {
+  return !!opt;
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, base::nullopt_t) {
+  return false;
+}
+
+template <class T>
+constexpr bool operator<(base::nullopt_t, const Optional<T>& opt) {
+  return !!opt;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, base::nullopt_t) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator<=(base::nullopt_t, const Optional<T>& opt) {
+  return true;
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, base::nullopt_t) {
+  return !!opt;
+}
+
+template <class T>
+constexpr bool operator>(base::nullopt_t, const Optional<T>& opt) {
+  return false;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, base::nullopt_t) {
+  return true;
+}
+
+template <class T>
+constexpr bool operator>=(base::nullopt_t, const Optional<T>& opt) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, const T& value) {
+  return opt != nullopt ? *opt == value : false;
+}
+
+template <class T>
+constexpr bool operator==(const T& value, const Optional<T>& opt) {
+  return opt == value;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, const T& value) {
+  return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator!=(const T& value, const Optional<T>& opt) {
+  return !(opt == value);
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>& opt, const T& value) {
+  return opt != nullopt ? *opt < value : true;
+}
+
+template <class T>
+constexpr bool operator<(const T& value, const Optional<T>& opt) {
+  return opt != nullopt ? value < *opt : false;
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, const T& value) {
+  return !(opt > value);
+}
+
+template <class T>
+constexpr bool operator<=(const T& value, const Optional<T>& opt) {
+  return !(value > opt);
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, const T& value) {
+  return value < opt;
+}
+
+template <class T>
+constexpr bool operator>(const T& value, const Optional<T>& opt) {
+  return opt < value;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>& opt, const T& value) {
+  return !(opt < value);
+}
+
+template <class T>
+constexpr bool operator>=(const T& value, const Optional<T>& opt) {
+  return !(value < opt);
+}
+
+template <class T>
+constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
+  return Optional<typename std::decay<T>::type>(std::forward<T>(value));
+}
+
+template <class T>
+void swap(Optional<T>& lhs, Optional<T>& rhs) {
+  lhs.swap(rhs);
+}
+
+}  // namespace base
+
+namespace std {
+
+template <class T>
+struct hash<base::Optional<T>> {
+  size_t operator()(const base::Optional<T>& opt) const {
+    return opt == base::nullopt ? 0 : std::hash<T>()(*opt);
+  }
+};
+
+}  // namespace std
+
+#endif  // BASE_OPTIONAL_H_
diff --git a/base/optional_unittest.cc b/base/optional_unittest.cc
new file mode 100644
index 0000000..d6bf263
--- /dev/null
+++ b/base/optional_unittest.cc
@@ -0,0 +1,1301 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/optional.h"
+
+#include <set>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// Object used to test complex object with Optional<T> in addition of the move
+// semantics.
+class TestObject {
+ public:
+  enum class State {
+    DEFAULT_CONSTRUCTED,
+    VALUE_CONSTRUCTED,
+    COPY_CONSTRUCTED,
+    MOVE_CONSTRUCTED,
+    MOVED_FROM,
+    COPY_ASSIGNED,
+    MOVE_ASSIGNED,
+    SWAPPED,
+  };
+
+  TestObject() : foo_(0), bar_(0.0), state_(State::DEFAULT_CONSTRUCTED) {}
+
+  TestObject(int foo, double bar)
+      : foo_(foo), bar_(bar), state_(State::VALUE_CONSTRUCTED) {}
+
+  TestObject(const TestObject& other)
+      : foo_(other.foo_), bar_(other.bar_), state_(State::COPY_CONSTRUCTED) {}
+
+  TestObject(TestObject&& other)
+      : foo_(std::move(other.foo_)),
+        bar_(std::move(other.bar_)),
+        state_(State::MOVE_CONSTRUCTED) {
+    other.state_ = State::MOVED_FROM;
+  }
+
+  TestObject& operator=(const TestObject& other) {
+    foo_ = other.foo_;
+    bar_ = other.bar_;
+    state_ = State::COPY_ASSIGNED;
+    return *this;
+  }
+
+  TestObject& operator=(TestObject&& other) {
+    foo_ = other.foo_;
+    bar_ = other.bar_;
+    state_ = State::MOVE_ASSIGNED;
+    other.state_ = State::MOVED_FROM;
+    return *this;
+  }
+
+  void Swap(TestObject* other) {
+    using std::swap;
+    swap(foo_, other->foo_);
+    swap(bar_, other->bar_);
+    state_ = State::SWAPPED;
+    other->state_ = State::SWAPPED;
+  }
+
+  bool operator==(const TestObject& other) const {
+    return foo_ == other.foo_ && bar_ == other.bar_;
+  }
+
+  int foo() const { return foo_; }
+  State state() const { return state_; }
+
+ private:
+  int foo_;
+  double bar_;
+  State state_;
+};
+
+// Implementing Swappable concept.
+void swap(TestObject& lhs, TestObject& rhs) {
+  lhs.Swap(&rhs);
+}
+
+class NonTriviallyDestructible {
+  ~NonTriviallyDestructible() {}
+};
+
+}  // anonymous namespace
+
+static_assert(is_trivially_destructible<Optional<int>>::value,
+              "OptionalIsTriviallyDestructible");
+
+static_assert(
+    !is_trivially_destructible<Optional<NonTriviallyDestructible>>::value,
+    "OptionalIsTriviallyDestructible");
+
+TEST(OptionalTest, DefaultConstructor) {
+  {
+    Optional<float> o;
+    EXPECT_FALSE(o);
+  }
+
+  {
+    Optional<std::string> o;
+    EXPECT_FALSE(o);
+  }
+
+  {
+    Optional<TestObject> o;
+    EXPECT_FALSE(o);
+  }
+}
+
+TEST(OptionalTest, CopyConstructor) {
+  {
+    Optional<float> first(0.1f);
+    Optional<float> other(first);
+
+    EXPECT_TRUE(other);
+    EXPECT_EQ(other.value(), 0.1f);
+    EXPECT_EQ(first, other);
+  }
+
+  {
+    Optional<std::string> first("foo");
+    Optional<std::string> other(first);
+
+    EXPECT_TRUE(other);
+    EXPECT_EQ(other.value(), "foo");
+    EXPECT_EQ(first, other);
+  }
+
+  {
+    Optional<TestObject> first(TestObject(3, 0.1));
+    Optional<TestObject> other(first);
+
+    EXPECT_TRUE(!!other);
+    EXPECT_TRUE(other.value() == TestObject(3, 0.1));
+    EXPECT_TRUE(first == other);
+  }
+}
+
+TEST(OptionalTest, ValueConstructor) {
+  {
+    Optional<float> o(0.1f);
+    EXPECT_TRUE(o);
+    EXPECT_EQ(o.value(), 0.1f);
+  }
+
+  {
+    Optional<std::string> o("foo");
+    EXPECT_TRUE(o);
+    EXPECT_EQ(o.value(), "foo");
+  }
+
+  {
+    Optional<TestObject> o(TestObject(3, 0.1));
+    EXPECT_TRUE(!!o);
+    EXPECT_TRUE(o.value() == TestObject(3, 0.1));
+  }
+}
+
+TEST(OptionalTest, MoveConstructor) {
+  {
+    Optional<float> first(0.1f);
+    Optional<float> second(std::move(first));
+
+    EXPECT_TRUE(second);
+    EXPECT_EQ(second.value(), 0.1f);
+
+    EXPECT_TRUE(first);
+  }
+
+  {
+    Optional<std::string> first("foo");
+    Optional<std::string> second(std::move(first));
+
+    EXPECT_TRUE(second);
+    EXPECT_EQ("foo", second.value());
+
+    EXPECT_TRUE(first);
+  }
+
+  {
+    Optional<TestObject> first(TestObject(3, 0.1));
+    Optional<TestObject> second(std::move(first));
+
+    EXPECT_TRUE(!!second);
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+    EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+    EXPECT_TRUE(!!first);
+    EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+  }
+}
+
+TEST(OptionalTest, MoveValueConstructor) {
+  {
+    Optional<float> first(0.1f);
+    Optional<float> second(std::move(first.value()));
+
+    EXPECT_TRUE(second);
+    EXPECT_EQ(second.value(), 0.1f);
+
+    EXPECT_TRUE(first);
+  }
+
+  {
+    Optional<std::string> first("foo");
+    Optional<std::string> second(std::move(first.value()));
+
+    EXPECT_TRUE(second);
+    EXPECT_EQ("foo", second.value());
+
+    EXPECT_TRUE(first);
+  }
+
+  {
+    Optional<TestObject> first(TestObject(3, 0.1));
+    Optional<TestObject> second(std::move(first.value()));
+
+    EXPECT_TRUE(!!second);
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
+    EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+
+    EXPECT_TRUE(!!first);
+    EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+  }
+}
+
+TEST(OptionalTest, ConstructorForwardArguments) {
+  {
+    Optional<float> a(base::in_place, 0.1f);
+    EXPECT_TRUE(a);
+    EXPECT_EQ(0.1f, a.value());
+  }
+
+  {
+    Optional<std::string> a(base::in_place, "foo");
+    EXPECT_TRUE(a);
+    EXPECT_EQ("foo", a.value());
+  }
+
+  {
+    Optional<TestObject> a(base::in_place, 0, 0.1);
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(TestObject(0, 0.1) == a.value());
+  }
+}
+
+TEST(OptionalTest, NulloptConstructor) {
+  Optional<int> a = base::nullopt;
+  EXPECT_FALSE(a);
+}
+
+TEST(OptionalTest, AssignValue) {
+  {
+    Optional<float> a;
+    EXPECT_FALSE(a);
+    a = 0.1f;
+    EXPECT_TRUE(a);
+
+    Optional<float> b(0.1f);
+    EXPECT_TRUE(a == b);
+  }
+
+  {
+    Optional<std::string> a;
+    EXPECT_FALSE(a);
+    a = std::string("foo");
+    EXPECT_TRUE(a);
+
+    Optional<std::string> b(std::string("foo"));
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<TestObject> a;
+    EXPECT_FALSE(!!a);
+    a = TestObject(3, 0.1);
+    EXPECT_TRUE(!!a);
+
+    Optional<TestObject> b(TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+
+  {
+    Optional<TestObject> a = TestObject(4, 1.0);
+    EXPECT_TRUE(!!a);
+    a = TestObject(3, 0.1);
+    EXPECT_TRUE(!!a);
+
+    Optional<TestObject> b(TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+}
+
+TEST(OptionalTest, AssignObject) {
+  {
+    Optional<float> a;
+    Optional<float> b(0.1f);
+    a = b;
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ(a.value(), 0.1f);
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<std::string> a;
+    Optional<std::string> b("foo");
+    a = b;
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ(a.value(), "foo");
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<TestObject> a;
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = b;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(4, 1.0));
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = b;
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(a.value() == TestObject(3, 0.1));
+    EXPECT_TRUE(a == b);
+  }
+}
+
+TEST(OptionalTest, AssignObject_rvalue) {
+  {
+    Optional<float> a;
+    Optional<float> b(0.1f);
+    a = std::move(b);
+
+    EXPECT_TRUE(a);
+    EXPECT_TRUE(b);
+    EXPECT_EQ(0.1f, a.value());
+  }
+
+  {
+    Optional<std::string> a;
+    Optional<std::string> b("foo");
+    a = std::move(b);
+
+    EXPECT_TRUE(a);
+    EXPECT_TRUE(b);
+    EXPECT_EQ("foo", a.value());
+  }
+
+  {
+    Optional<TestObject> a;
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = std::move(b);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, a->state());
+    EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+  }
+
+  {
+    Optional<TestObject> a(TestObject(4, 1.0));
+    Optional<TestObject> b(TestObject(3, 0.1));
+    a = std::move(b);
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(!!b);
+    EXPECT_TRUE(TestObject(3, 0.1) == a.value());
+
+    EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, a->state());
+    EXPECT_EQ(TestObject::State::MOVED_FROM, b->state());
+  }
+}
+
+TEST(OptionalTest, AssignNull) {
+  {
+    Optional<float> a(0.1f);
+    Optional<float> b(0.2f);
+    a = base::nullopt;
+    b = base::nullopt;
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    Optional<std::string> b("bar");
+    a = base::nullopt;
+    b = base::nullopt;
+    EXPECT_EQ(a, b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    Optional<TestObject> b(TestObject(4, 1.0));
+    a = base::nullopt;
+    b = base::nullopt;
+    EXPECT_TRUE(a == b);
+  }
+}
+
+TEST(OptionalTest, OperatorStar) {
+  {
+    Optional<float> a(0.1f);
+    EXPECT_EQ(a.value(), *a);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    EXPECT_EQ(a.value(), *a);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    EXPECT_EQ(a.value(), *a);
+  }
+}
+
+TEST(OptionalTest, OperatorStar_rvalue) {
+  EXPECT_EQ(0.1f, *Optional<float>(0.1f));
+  EXPECT_EQ(std::string("foo"), *Optional<std::string>("foo"));
+  EXPECT_TRUE(TestObject(3, 0.1) == *Optional<TestObject>(TestObject(3, 0.1)));
+}
+
+TEST(OptionalTest, OperatorArrow) {
+  Optional<TestObject> a(TestObject(3, 0.1));
+  EXPECT_EQ(a->foo(), 3);
+}
+
+TEST(OptionalTest, Value_rvalue) {
+  EXPECT_EQ(0.1f, Optional<float>(0.1f).value());
+  EXPECT_EQ(std::string("foo"), Optional<std::string>("foo").value());
+  EXPECT_TRUE(TestObject(3, 0.1) ==
+              Optional<TestObject>(TestObject(3, 0.1)).value());
+}
+
+TEST(OptionalTest, ValueOr) {
+  {
+    Optional<float> a;
+    EXPECT_EQ(0.0f, a.value_or(0.0f));
+
+    a = 0.1f;
+    EXPECT_EQ(0.1f, a.value_or(0.0f));
+
+    a = base::nullopt;
+    EXPECT_EQ(0.0f, a.value_or(0.0f));
+  }
+
+  {
+    Optional<std::string> a;
+    EXPECT_EQ("bar", a.value_or("bar"));
+
+    a = std::string("foo");
+    EXPECT_EQ(std::string("foo"), a.value_or("bar"));
+
+    a = base::nullopt;
+    EXPECT_EQ(std::string("bar"), a.value_or("bar"));
+  }
+
+  {
+    Optional<TestObject> a;
+    EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+
+    a = TestObject(3, 0.1);
+    EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(3, 0.1));
+
+    a = base::nullopt;
+    EXPECT_TRUE(a.value_or(TestObject(1, 0.3)) == TestObject(1, 0.3));
+  }
+}
+
+TEST(OptionalTest, Swap_bothNoValue) {
+  Optional<TestObject> a, b;
+  a.swap(b);
+
+  EXPECT_FALSE(a);
+  EXPECT_FALSE(b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_inHasValue) {
+  Optional<TestObject> a(TestObject(1, 0.3));
+  Optional<TestObject> b;
+  a.swap(b);
+
+  EXPECT_FALSE(a);
+
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_outHasValue) {
+  Optional<TestObject> a;
+  Optional<TestObject> b(TestObject(1, 0.3));
+  a.swap(b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_FALSE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, Swap_bothValue) {
+  Optional<TestObject> a(TestObject(0, 0.1));
+  Optional<TestObject> b(TestObject(1, 0.3));
+  a.swap(b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+  EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+  EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Emplace) {
+  {
+    Optional<float> a(0.1f);
+    a.emplace(0.3f);
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ(0.3f, a.value());
+  }
+
+  {
+    Optional<std::string> a("foo");
+    a.emplace("bar");
+
+    EXPECT_TRUE(a);
+    EXPECT_EQ("bar", a.value());
+  }
+
+  {
+    Optional<TestObject> a(TestObject(0, 0.1));
+    a.emplace(TestObject(1, 0.2));
+
+    EXPECT_TRUE(!!a);
+    EXPECT_TRUE(TestObject(1, 0.2) == a.value());
+  }
+}
+
+TEST(OptionalTest, Equals_TwoEmpty) {
+  Optional<int> a;
+  Optional<int> b;
+
+  EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoEquals) {
+  Optional<int> a(1);
+  Optional<int> b(1);
+
+  EXPECT_TRUE(a == b);
+}
+
+TEST(OptionalTest, Equals_OneEmpty) {
+  Optional<int> a;
+  Optional<int> b(1);
+
+  EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, Equals_TwoDifferent) {
+  Optional<int> a(0);
+  Optional<int> b(1);
+
+  EXPECT_FALSE(a == b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEmpty) {
+  Optional<int> a;
+  Optional<int> b;
+
+  EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoEquals) {
+  Optional<int> a(1);
+  Optional<int> b(1);
+
+  EXPECT_FALSE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_OneEmpty) {
+  Optional<int> a;
+  Optional<int> b(1);
+
+  EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, NotEquals_TwoDifferent) {
+  Optional<int> a(0);
+  Optional<int> b(1);
+
+  EXPECT_TRUE(a != b);
+}
+
+TEST(OptionalTest, Less_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_TRUE(l < r);
+}
+
+TEST(OptionalTest, Less_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_FALSE(l < r);
+}
+
+TEST(OptionalTest, Less_BothValues) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_TRUE(l < r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l < r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l < r);
+  }
+}
+
+TEST(OptionalTest, LessEq_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_FALSE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_TRUE(l <= r);
+}
+
+TEST(OptionalTest, LessEq_BothValues) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_TRUE(l <= r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l <= r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l <= r);
+  }
+}
+
+TEST(OptionalTest, Greater_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_FALSE(l > r);
+}
+
+TEST(OptionalTest, Greater_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_TRUE(l > r);
+}
+
+TEST(OptionalTest, Greater_BothValue) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_FALSE(l > r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l > r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_FALSE(l > r);
+  }
+}
+
+TEST(OptionalTest, GreaterEq_BothEmpty) {
+  Optional<int> l;
+  Optional<int> r;
+
+  EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_LeftEmpty) {
+  Optional<int> l;
+  Optional<int> r(1);
+
+  EXPECT_FALSE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_RightEmpty) {
+  Optional<int> l(1);
+  Optional<int> r;
+
+  EXPECT_TRUE(l >= r);
+}
+
+TEST(OptionalTest, GreaterEq_BothValue) {
+  {
+    Optional<int> l(1);
+    Optional<int> r(2);
+
+    EXPECT_FALSE(l >= r);
+  }
+  {
+    Optional<int> l(2);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l >= r);
+  }
+  {
+    Optional<int> l(1);
+    Optional<int> r(1);
+
+    EXPECT_TRUE(l >= r);
+  }
+}
+
+TEST(OptionalTest, OptNullEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(opt == base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt == base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(base::nullopt == opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(base::nullopt == opt);
+  }
+}
+
+TEST(OptionalTest, OptNullNotEq) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(opt != base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt != base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptNotEq) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(base::nullopt != opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(base::nullopt != opt);
+  }
+}
+
+TEST(OptionalTest, OptNullLower) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(opt < base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt < base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptLower) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(base::nullopt < opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(base::nullopt < opt);
+  }
+}
+
+TEST(OptionalTest, OptNullLowerEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(opt <= base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt <= base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptLowerEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(base::nullopt <= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(base::nullopt <= opt);
+  }
+}
+
+TEST(OptionalTest, OptNullGreater) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(opt > base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt > base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptGreater) {
+  {
+    Optional<int> opt;
+    EXPECT_FALSE(base::nullopt > opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(base::nullopt > opt);
+  }
+}
+
+TEST(OptionalTest, OptNullGreaterEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(opt >= base::nullopt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt >= base::nullopt);
+  }
+}
+
+TEST(OptionalTest, NullOptGreaterEq) {
+  {
+    Optional<int> opt;
+    EXPECT_TRUE(base::nullopt >= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(base::nullopt >= opt);
+  }
+}
+
+TEST(OptionalTest, ValueEq_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(opt == 1);
+}
+
+TEST(OptionalTest, ValueEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(opt == 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt == 1);
+  }
+}
+
+TEST(OptionalTest, EqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(1 == opt);
+}
+
+TEST(OptionalTest, EqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(1 == opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(1 == opt);
+  }
+}
+
+TEST(OptionalTest, ValueNotEq_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(opt != 1);
+}
+
+TEST(OptionalTest, ValueNotEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(opt != 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt != 1);
+  }
+}
+
+TEST(OptionalTest, NotEqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(1 != opt);
+}
+
+TEST(OptionalTest, NotEqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(1 != opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(1 != opt);
+  }
+}
+
+TEST(OptionalTest, ValueLess_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(opt < 1);
+}
+
+TEST(OptionalTest, ValueLess_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(opt < 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt < 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(opt < 1);
+  }
+}
+
+TEST(OptionalTest, LessValue_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(1 < opt);
+}
+
+TEST(OptionalTest, LessValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(1 < opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(1 < opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(1 < opt);
+  }
+}
+
+TEST(OptionalTest, ValueLessEq_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(opt <= 1);
+}
+
+TEST(OptionalTest, ValueLessEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(opt <= 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt <= 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(opt <= 1);
+  }
+}
+
+TEST(OptionalTest, LessEqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(1 <= opt);
+}
+
+TEST(OptionalTest, LessEqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(1 <= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(1 <= opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(1 <= opt);
+  }
+}
+
+TEST(OptionalTest, ValueGreater_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(opt > 1);
+}
+
+TEST(OptionalTest, ValueGreater_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(opt > 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(opt > 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(opt > 1);
+  }
+}
+
+TEST(OptionalTest, GreaterValue_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(1 > opt);
+}
+
+TEST(OptionalTest, GreaterValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(1 > opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_FALSE(1 > opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(1 > opt);
+  }
+}
+
+TEST(OptionalTest, ValueGreaterEq_Empty) {
+  Optional<int> opt;
+  EXPECT_FALSE(opt >= 1);
+}
+
+TEST(OptionalTest, ValueGreaterEq_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_FALSE(opt >= 1);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(opt >= 1);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_TRUE(opt >= 1);
+  }
+}
+
+TEST(OptionalTest, GreaterEqValue_Empty) {
+  Optional<int> opt;
+  EXPECT_TRUE(1 >= opt);
+}
+
+TEST(OptionalTest, GreaterEqValue_NotEmpty) {
+  {
+    Optional<int> opt(0);
+    EXPECT_TRUE(1 >= opt);
+  }
+  {
+    Optional<int> opt(1);
+    EXPECT_TRUE(1 >= opt);
+  }
+  {
+    Optional<int> opt(2);
+    EXPECT_FALSE(1 >= opt);
+  }
+}
+
+TEST(OptionalTest, NotEquals) {
+  {
+    Optional<float> a(0.1f);
+    Optional<float> b(0.2f);
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    Optional<std::string> b("bar");
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    Optional<TestObject> b(TestObject(4, 1.0));
+    EXPECT_TRUE(a != b);
+  }
+}
+
+TEST(OptionalTest, NotEqualsNull) {
+  {
+    Optional<float> a(0.1f);
+    Optional<float> b(0.1f);
+    b = base::nullopt;
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<std::string> a("foo");
+    Optional<std::string> b("foo");
+    b = base::nullopt;
+    EXPECT_NE(a, b);
+  }
+
+  {
+    Optional<TestObject> a(TestObject(3, 0.1));
+    Optional<TestObject> b(TestObject(3, 0.1));
+    b = base::nullopt;
+    EXPECT_TRUE(a != b);
+  }
+}
+
+TEST(OptionalTest, MakeOptional) {
+  {
+    Optional<float> o = base::make_optional(32.f);
+    EXPECT_TRUE(o);
+    EXPECT_EQ(32.f, *o);
+
+    float value = 3.f;
+    o = base::make_optional(std::move(value));
+    EXPECT_TRUE(o);
+    EXPECT_EQ(3.f, *o);
+  }
+
+  {
+    Optional<std::string> o = base::make_optional(std::string("foo"));
+    EXPECT_TRUE(o);
+    EXPECT_EQ("foo", *o);
+
+    std::string value = "bar";
+    o = base::make_optional(std::move(value));
+    EXPECT_TRUE(o);
+    EXPECT_EQ(std::string("bar"), *o);
+  }
+
+  {
+    Optional<TestObject> o = base::make_optional(TestObject(3, 0.1));
+    EXPECT_TRUE(!!o);
+    EXPECT_TRUE(TestObject(3, 0.1) == *o);
+
+    TestObject value = TestObject(0, 0.42);
+    o = base::make_optional(std::move(value));
+    EXPECT_TRUE(!!o);
+    EXPECT_TRUE(TestObject(0, 0.42) == *o);
+    EXPECT_EQ(TestObject::State::MOVED_FROM, value.state());
+    EXPECT_EQ(TestObject::State::MOVE_ASSIGNED, o->state());
+
+    EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED,
+              base::make_optional(std::move(value))->state());
+  }
+}
+
+TEST(OptionalTest, NonMemberSwap_bothNoValue) {
+  Optional<TestObject> a, b;
+  base::swap(a, b);
+
+  EXPECT_FALSE(!!a);
+  EXPECT_FALSE(!!b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_inHasValue) {
+  Optional<TestObject> a(TestObject(1, 0.3));
+  Optional<TestObject> b;
+  base::swap(a, b);
+
+  EXPECT_FALSE(!!a);
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(42, 0.42) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(1, 0.3) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_outHasValue) {
+  Optional<TestObject> a;
+  Optional<TestObject> b(TestObject(1, 0.3));
+  base::swap(a, b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_FALSE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(42, 0.42) == b.value_or(TestObject(42, 0.42)));
+}
+
+TEST(OptionalTest, NonMemberSwap_bothValue) {
+  Optional<TestObject> a(TestObject(0, 0.1));
+  Optional<TestObject> b(TestObject(1, 0.3));
+  base::swap(a, b);
+
+  EXPECT_TRUE(!!a);
+  EXPECT_TRUE(!!b);
+  EXPECT_TRUE(TestObject(1, 0.3) == a.value_or(TestObject(42, 0.42)));
+  EXPECT_TRUE(TestObject(0, 0.1) == b.value_or(TestObject(42, 0.42)));
+  EXPECT_EQ(TestObject::State::SWAPPED, a->state());
+  EXPECT_EQ(TestObject::State::SWAPPED, b->state());
+}
+
+TEST(OptionalTest, Hash_OptionalReflectsInternal) {
+  {
+    std::hash<int> int_hash;
+    std::hash<Optional<int>> opt_int_hash;
+
+    EXPECT_EQ(int_hash(1), opt_int_hash(Optional<int>(1)));
+  }
+
+  {
+    std::hash<std::string> str_hash;
+    std::hash<Optional<std::string>> opt_str_hash;
+
+    EXPECT_EQ(str_hash(std::string("foobar")),
+              opt_str_hash(Optional<std::string>(std::string("foobar"))));
+  }
+}
+
+TEST(OptionalTest, Hash_NullOptEqualsNullOpt) {
+  std::hash<Optional<int>> opt_int_hash;
+  std::hash<Optional<std::string>> opt_str_hash;
+
+  EXPECT_EQ(opt_str_hash(Optional<std::string>()),
+            opt_int_hash(Optional<int>()));
+}
+
+TEST(OptionalTest, Hash_UseInSet) {
+  std::set<Optional<int>> setOptInt;
+
+  EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+
+  setOptInt.insert(Optional<int>(3));
+  EXPECT_EQ(setOptInt.end(), setOptInt.find(42));
+  EXPECT_NE(setOptInt.end(), setOptInt.find(3));
+}
+
+}  // namespace base
diff --git a/base/os_compat_android.cc b/base/os_compat_android.cc
new file mode 100644
index 0000000..1eb6536
--- /dev/null
+++ b/base/os_compat_android.cc
@@ -0,0 +1,177 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include <asm/unistd.h>
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+
+#if !defined(__LP64__)
+#include <time64.h>
+#endif
+
+#include "base/rand_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+
+extern "C" {
+// There is no futimes() avaiable in Bionic, so we provide our own
+// implementation until it is there.
+int futimes(int fd, const struct timeval tv[2]) {
+  if (tv == NULL)
+    return syscall(__NR_utimensat, fd, NULL, NULL, 0);
+
+  if (tv[0].tv_usec < 0 || tv[0].tv_usec >= 1000000 ||
+      tv[1].tv_usec < 0 || tv[1].tv_usec >= 1000000) {
+    errno = EINVAL;
+    return -1;
+  }
+
+  // Convert timeval to timespec.
+  struct timespec ts[2];
+  ts[0].tv_sec = tv[0].tv_sec;
+  ts[0].tv_nsec = tv[0].tv_usec * 1000;
+  ts[1].tv_sec = tv[1].tv_sec;
+  ts[1].tv_nsec = tv[1].tv_usec * 1000;
+  return syscall(__NR_utimensat, fd, NULL, ts, 0);
+}
+
+#if !defined(__LP64__)
+// 32-bit Android has only timegm64() and not timegm().
+// We replicate the behaviour of timegm() when the result overflows time_t.
+time_t timegm(struct tm* const t) {
+  // time_t is signed on Android.
+  static const time_t kTimeMax = ~(1L << (sizeof(time_t) * CHAR_BIT - 1));
+  static const time_t kTimeMin = (1L << (sizeof(time_t) * CHAR_BIT - 1));
+  time64_t result = timegm64(t);
+  if (result < kTimeMin || result > kTimeMax)
+    return -1;
+  return result;
+}
+#endif
+
+// The following is only needed when building with GCC 4.6 or higher
+// (i.e. not with Android GCC 4.4.3, nor with Clang).
+//
+// GCC is now capable of optimizing successive calls to sin() and cos() into
+// a single call to sincos(). This means that source code that looks like:
+//
+//     double c, s;
+//     c = cos(angle);
+//     s = sin(angle);
+//
+// Will generate machine code that looks like:
+//
+//     double c, s;
+//     sincos(angle, &s, &c);
+//
+// Unfortunately, sincos() and friends are not part of the Android libm.so
+// library provided by the NDK for API level 9. When the optimization kicks
+// in, it makes the final build fail with a puzzling message (puzzling
+// because 'sincos' doesn't appear anywhere in the sources!).
+//
+// To solve this, we provide our own implementation of the sincos() function
+// and related friends. Note that we must also explicitely tell GCC to disable
+// optimizations when generating these. Otherwise, the generated machine code
+// for each function would simply end up calling itself, resulting in a
+// runtime crash due to stack overflow.
+//
+#if defined(__GNUC__) && !defined(__clang__) && \
+    !defined(ANDROID_SINCOS_PROVIDED)
+
+// For the record, Clang does not support the 'optimize' attribute.
+// In the unlikely event that it begins performing this optimization too,
+// we'll have to find a different way to achieve this. NOTE: Tested with O1
+// which still performs the optimization.
+//
+#define GCC_NO_OPTIMIZE  __attribute__((optimize("O0")))
+
+GCC_NO_OPTIMIZE
+void sincos(double angle, double* s, double *c) {
+  *c = cos(angle);
+  *s = sin(angle);
+}
+
+GCC_NO_OPTIMIZE
+void sincosf(float angle, float* s, float* c) {
+  *c = cosf(angle);
+  *s = sinf(angle);
+}
+
+#endif // __GNUC__ && !__clang__
+
+// An implementation of mkdtemp, since it is not exposed by the NDK
+// for native API level 9 that we target.
+//
+// For any changes in the mkdtemp function, you should manually run the unittest
+// OsCompatAndroidTest.DISABLED_TestMkdTemp in your local machine to check if it
+// passes. Please don't enable it, since it creates a directory and may be
+// source of flakyness.
+char* mkdtemp(char* path) {
+  if (path == NULL) {
+    errno = EINVAL;
+    return NULL;
+  }
+
+  const int path_len = strlen(path);
+
+  // The last six characters of 'path' must be XXXXXX.
+  const base::StringPiece kSuffix("XXXXXX");
+  const int kSuffixLen = kSuffix.length();
+  if (!base::StringPiece(path, path_len).ends_with(kSuffix)) {
+    errno = EINVAL;
+    return NULL;
+  }
+
+  // If the path contains a directory, as in /tmp/foo/XXXXXXXX, make sure
+  // that /tmp/foo exists, otherwise we're going to loop a really long
+  // time for nothing below
+  char* dirsep = strrchr(path, '/');
+  if (dirsep != NULL) {
+    struct stat st;
+    int ret;
+
+    *dirsep = '\0';  // Terminating directory path temporarily
+
+    ret = stat(path, &st);
+
+    *dirsep = '/';  // Restoring directory separator
+    if (ret < 0)  // Directory probably does not exist
+      return NULL;
+    if (!S_ISDIR(st.st_mode)) {  // Not a directory
+      errno = ENOTDIR;
+      return NULL;
+    }
+  }
+
+  // Max number of tries using different random suffixes.
+  const int kMaxTries = 100;
+
+  // Now loop until we CAN create a directory by that name or we reach the max
+  // number of tries.
+  for (int i = 0; i < kMaxTries; ++i) {
+    // Fill the suffix XXXXXX with a random string composed of a-z chars.
+    for (int pos = 0; pos < kSuffixLen; ++pos) {
+      char rand_char = static_cast<char>(base::RandInt('a', 'z'));
+      path[path_len - kSuffixLen + pos] = rand_char;
+    }
+    if (mkdir(path, 0700) == 0) {
+      // We just created the directory succesfully.
+      return path;
+    }
+    if (errno != EEXIST) {
+      // The directory doesn't exist, but an error occured
+      return NULL;
+    }
+  }
+
+  // We reached the max number of tries.
+  return NULL;
+}
+
+}  // extern "C"
diff --git a/base/os_compat_android.h b/base/os_compat_android.h
new file mode 100644
index 0000000..0f25444
--- /dev/null
+++ b/base/os_compat_android.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OS_COMPAT_ANDROID_H_
+#define BASE_OS_COMPAT_ANDROID_H_
+
+#include <fcntl.h>
+#include <sys/types.h>
+#include <utime.h>
+
+// Not implemented in Bionic.
+extern "C" int futimes(int fd, const struct timeval tv[2]);
+
+// Not exposed or implemented in Bionic.
+extern "C" char* mkdtemp(char* path);
+
+// Android has no timegm().
+extern "C" time_t timegm(struct tm* const t);
+
+// The lockf() function is not available on Android; we translate to flock().
+#define F_LOCK LOCK_EX
+#define F_ULOCK LOCK_UN
+inline int lockf(int fd, int cmd, off_t ignored_len) {
+  return flock(fd, cmd);
+}
+
+#endif  // BASE_OS_COMPAT_ANDROID_H_
diff --git a/base/os_compat_android_unittest.cc b/base/os_compat_android_unittest.cc
new file mode 100644
index 0000000..7fbdc6d
--- /dev/null
+++ b/base/os_compat_android_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+typedef testing::Test OsCompatAndroidTest;
+
+// Keep this Unittest DISABLED_ , because it actually creates a directory in the
+// device and it may be source of flakyness. For any changes in the mkdtemp
+// function, you should run this unittest in your local machine to check if it
+// passes.
+TEST_F(OsCompatAndroidTest, DISABLED_TestMkdTemp) {
+  FilePath tmp_dir;
+  EXPECT_TRUE(base::GetTempDir(&tmp_dir));
+
+  // Not six XXXXXX at the suffix of the path.
+  FilePath sub_dir = tmp_dir.Append("XX");
+  std::string sub_dir_string = sub_dir.value();
+  // this should be OK since mkdtemp just replaces characters in place
+  char* buffer = const_cast<char*>(sub_dir_string.c_str());
+  EXPECT_EQ(NULL, mkdtemp(buffer));
+
+  // Directory does not exist
+  char invalid_path2[] = "doesntoexist/foobarXXXXXX";
+  EXPECT_EQ(NULL, mkdtemp(invalid_path2));
+
+  // Successfully create a tmp dir.
+  FilePath sub_dir2 = tmp_dir.Append("XXXXXX");
+  std::string sub_dir2_string = sub_dir2.value();
+  // this should be OK since mkdtemp just replaces characters in place
+  char* buffer2 = const_cast<char*>(sub_dir2_string.c_str());
+  EXPECT_TRUE(mkdtemp(buffer2) != NULL);
+}
+
+}  // namespace base
diff --git a/base/pending_task.cc b/base/pending_task.cc
index 3d78914..73834bd 100644
--- a/base/pending_task.cc
+++ b/base/pending_task.cc
@@ -9,9 +9,9 @@
 namespace base {
 
 PendingTask::PendingTask(const tracked_objects::Location& posted_from,
-                         const base::Closure& task)
+                         base::Closure task)
     : base::TrackingInfo(posted_from, TimeTicks()),
-      task(task),
+      task(std::move(task)),
       posted_from(posted_from),
       sequence_num(0),
       nestable(true),
@@ -19,20 +19,24 @@
 }
 
 PendingTask::PendingTask(const tracked_objects::Location& posted_from,
-                         const base::Closure& task,
+                         base::Closure task,
                          TimeTicks delayed_run_time,
                          bool nestable)
     : base::TrackingInfo(posted_from, delayed_run_time),
-      task(task),
+      task(std::move(task)),
       posted_from(posted_from),
       sequence_num(0),
       nestable(nestable),
       is_high_res(false) {
 }
 
+PendingTask::PendingTask(PendingTask&& other) = default;
+
 PendingTask::~PendingTask() {
 }
 
+PendingTask& PendingTask::operator=(PendingTask&& other) = default;
+
 bool PendingTask::operator<(const PendingTask& other) const {
   // Since the top of a priority queue is defined as the "greatest" element, we
   // need to invert the comparison here.  We want the smaller time to be at the
@@ -49,8 +53,4 @@
   return (sequence_num - other.sequence_num) > 0;
 }
 
-void TaskQueue::Swap(TaskQueue* queue) {
-  c.swap(queue->c);  // Calls std::deque::swap.
-}
-
 }  // namespace base
diff --git a/base/pending_task.h b/base/pending_task.h
index fddfc86..5761653 100644
--- a/base/pending_task.h
+++ b/base/pending_task.h
@@ -19,13 +19,16 @@
 // for use by classes that queue and execute tasks.
 struct BASE_EXPORT PendingTask : public TrackingInfo {
   PendingTask(const tracked_objects::Location& posted_from,
-              const Closure& task);
+              Closure task);
   PendingTask(const tracked_objects::Location& posted_from,
-              const Closure& task,
+              Closure task,
               TimeTicks delayed_run_time,
               bool nestable);
+  PendingTask(PendingTask&& other);
   ~PendingTask();
 
+  PendingTask& operator=(PendingTask&& other);
+
   // Used to support sorting.
   bool operator<(const PendingTask& other) const;
 
@@ -45,15 +48,10 @@
   bool is_high_res;
 };
 
-// Wrapper around std::queue specialized for PendingTask which adds a Swap
-// helper method.
-class BASE_EXPORT TaskQueue : public std::queue<PendingTask> {
- public:
-  void Swap(TaskQueue* queue);
-};
+using TaskQueue = std::queue<PendingTask>;
 
 // PendingTasks are sorted by their |delayed_run_time| property.
-typedef std::priority_queue<base::PendingTask> DelayedTaskQueue;
+using DelayedTaskQueue = std::priority_queue<base::PendingTask>;
 
 }  // namespace base
 
diff --git a/base/pickle.cc b/base/pickle.cc
index d83391b..4ef167b 100644
--- a/base/pickle.cc
+++ b/base/pickle.cc
@@ -11,6 +11,7 @@
 
 #include "base/bits.h"
 #include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -89,7 +90,15 @@
 }
 
 bool PickleIterator::ReadLong(long* result) {
-  return ReadBuiltinType(result);
+  // Always read long as a 64-bit value to ensure compatibility between 32-bit
+  // and 64-bit processes.
+  int64_t result_int64 = 0;
+  if (!ReadBuiltinType(&result_int64))
+    return false;
+  // CHECK if the cast truncates the value so that we know to change this IPC
+  // parameter to use int64_t.
+  *result = base::checked_cast<long>(result_int64);
+  return true;
 }
 
 bool PickleIterator::ReadUInt16(uint16_t* result) {
@@ -108,16 +117,6 @@
   return ReadBuiltinType(result);
 }
 
-bool PickleIterator::ReadSizeT(size_t* result) {
-  // Always read size_t as a 64-bit value to ensure compatibility between 32-bit
-  // and 64-bit processes.
-  uint64_t result_uint64 = 0;
-  bool success = ReadBuiltinType(&result_uint64);
-  *result = static_cast<size_t>(result_uint64);
-  // Fail if the cast above truncates the value.
-  return success && (*result == result_uint64);
-}
-
 bool PickleIterator::ReadFloat(float* result) {
   // crbug.com/315213
   // The source data may not be properly aligned, and unaligned float reads
@@ -208,6 +207,49 @@
   return true;
 }
 
+PickleSizer::PickleSizer() {}
+
+PickleSizer::~PickleSizer() {}
+
+void PickleSizer::AddString(const StringPiece& value) {
+  AddInt();
+  AddBytes(static_cast<int>(value.size()));
+}
+
+void PickleSizer::AddString16(const StringPiece16& value) {
+  AddInt();
+  AddBytes(static_cast<int>(value.size() * sizeof(char16)));
+}
+
+void PickleSizer::AddData(int length) {
+  CHECK_GE(length, 0);
+  AddInt();
+  AddBytes(length);
+}
+
+void PickleSizer::AddBytes(int length) {
+  payload_size_ += bits::Align(length, sizeof(uint32_t));
+}
+
+void PickleSizer::AddAttachment() {
+  // From IPC::Message::WriteAttachment
+  AddBool();
+  AddInt();
+}
+
+template <size_t length> void PickleSizer::AddBytesStatic() {
+  DCHECK_LE(length, static_cast<size_t>(std::numeric_limits<int>::max()));
+  AddBytes(length);
+}
+
+template void PickleSizer::AddBytesStatic<2>();
+template void PickleSizer::AddBytesStatic<4>();
+template void PickleSizer::AddBytesStatic<8>();
+
+Pickle::Attachment::Attachment() {}
+
+Pickle::Attachment::~Attachment() {}
+
 // Payload is uint32_t aligned.
 
 Pickle::Pickle()
@@ -322,6 +364,19 @@
     Resize(capacity_after_header_ * 2 + new_size);
 }
 
+bool Pickle::WriteAttachment(scoped_refptr<Attachment> /*attachment*/) {
+  return false;
+}
+
+bool Pickle::ReadAttachment(base::PickleIterator* /*iter*/,
+                            scoped_refptr<Attachment>* /*attachment*/) const {
+  return false;
+}
+
+bool Pickle::HasAttachments() const {
+  return false;
+}
+
 void Pickle::Resize(size_t new_capacity) {
   CHECK_NE(capacity_after_header_, kCapacityReadOnly);
   capacity_after_header_ = bits::Align(new_capacity, kPayloadUnit);
diff --git a/base/pickle.h b/base/pickle.h
index 02bc432..40f5d26 100644
--- a/base/pickle.h
+++ b/base/pickle.h
@@ -14,9 +14,14 @@
 #include "base/compiler_specific.h"
 #include "base/gtest_prod_util.h"
 #include "base/logging.h"
+#include "base/memory/ref_counted.h"
 #include "base/strings/string16.h"
 #include "base/strings/string_piece.h"
 
+#if defined(OS_POSIX)
+#include "base/files/file.h"
+#endif
+
 namespace base {
 
 class Pickle;
@@ -40,7 +45,6 @@
   bool ReadUInt32(uint32_t* result) WARN_UNUSED_RESULT;
   bool ReadInt64(int64_t* result) WARN_UNUSED_RESULT;
   bool ReadUInt64(uint64_t* result) WARN_UNUSED_RESULT;
-  bool ReadSizeT(size_t* result) WARN_UNUSED_RESULT;
   bool ReadFloat(float* result) WARN_UNUSED_RESULT;
   bool ReadDouble(double* result) WARN_UNUSED_RESULT;
   bool ReadString(std::string* result) WARN_UNUSED_RESULT;
@@ -104,6 +108,42 @@
   FRIEND_TEST_ALL_PREFIXES(PickleTest, GetReadPointerAndAdvance);
 };
 
+// This class provides an interface analogous to base::Pickle's WriteFoo()
+// methods and can be used to accurately compute the size of a hypothetical
+// Pickle's payload without having to reference the Pickle implementation.
+class BASE_EXPORT PickleSizer {
+ public:
+  PickleSizer();
+  ~PickleSizer();
+
+  // Returns the computed size of the payload.
+  size_t payload_size() const { return payload_size_; }
+
+  void AddBool() { return AddInt(); }
+  void AddInt() { AddPOD<int>(); }
+  void AddLong() { AddPOD<uint64_t>(); }
+  void AddUInt16() { return AddPOD<uint16_t>(); }
+  void AddUInt32() { return AddPOD<uint32_t>(); }
+  void AddInt64() { return AddPOD<int64_t>(); }
+  void AddUInt64() { return AddPOD<uint64_t>(); }
+  void AddFloat() { return AddPOD<float>(); }
+  void AddDouble() { return AddPOD<double>(); }
+  void AddString(const StringPiece& value);
+  void AddString16(const StringPiece16& value);
+  void AddData(int length);
+  void AddBytes(int length);
+  void AddAttachment();
+
+ private:
+  // Just like AddBytes() but with a compile-time size for performance.
+  template<size_t length> void BASE_EXPORT AddBytesStatic();
+
+  template <typename T>
+  void AddPOD() { AddBytesStatic<sizeof(T)>(); }
+
+  size_t payload_size_ = 0;
+};
+
 // This class provides facilities for basic binary value packing and unpacking.
 //
 // The Pickle class supports appending primitive values (ints, strings, etc.)
@@ -123,6 +163,21 @@
 //
 class BASE_EXPORT Pickle {
  public:
+  // Auxiliary data attached to a Pickle. Pickle must be subclassed along with
+  // this interface in order to provide a concrete implementation of support
+  // for attachments. The base Pickle implementation does not accept
+  // attachments.
+  class BASE_EXPORT Attachment : public RefCountedThreadSafe<Attachment> {
+   public:
+    Attachment();
+
+   protected:
+    friend class RefCountedThreadSafe<Attachment>;
+    virtual ~Attachment();
+
+    DISALLOW_COPY_AND_ASSIGN(Attachment);
+  };
+
   // Initialize a Pickle object using the default header size.
   Pickle();
 
@@ -173,23 +228,15 @@
   bool WriteInt(int value) {
     return WritePOD(value);
   }
-  // WARNING: DO NOT USE THIS METHOD IF PICKLES ARE PERSISTED IN ANY WAY.
-  // It will write whatever a "long" is on this architecture. On 32-bit
-  // platforms, it is 32 bits. On 64-bit platforms, it is 64 bits. If persisted
-  // pickles are still around after upgrading to 64-bit, or if they are copied
-  // between dissimilar systems, YOUR PICKLES WILL HAVE GONE BAD.
-  bool WriteLongUsingDangerousNonPortableLessPersistableForm(long value) {
-    return WritePOD(value);
+  bool WriteLong(long value) {
+    // Always write long as a 64-bit value to ensure compatibility between
+    // 32-bit and 64-bit processes.
+    return WritePOD(static_cast<int64_t>(value));
   }
   bool WriteUInt16(uint16_t value) { return WritePOD(value); }
   bool WriteUInt32(uint32_t value) { return WritePOD(value); }
   bool WriteInt64(int64_t value) { return WritePOD(value); }
   bool WriteUInt64(uint64_t value) { return WritePOD(value); }
-  bool WriteSizeT(size_t value) {
-    // Always write size_t as a 64-bit value to ensure compatibility between
-    // 32-bit and 64-bit processes.
-    return WritePOD(static_cast<uint64_t>(value));
-  }
   bool WriteFloat(float value) {
     return WritePOD(value);
   }
@@ -206,6 +253,19 @@
   // known size. See also WriteData.
   bool WriteBytes(const void* data, int length);
 
+  // WriteAttachment appends |attachment| to the pickle. It returns
+  // false iff the set is full or if the Pickle implementation does not support
+  // attachments.
+  virtual bool WriteAttachment(scoped_refptr<Attachment> attachment);
+
+  // ReadAttachment parses an attachment given the parsing state |iter| and
+  // writes it to |*attachment|. It returns true on success.
+  virtual bool ReadAttachment(base::PickleIterator* iter,
+                              scoped_refptr<Attachment>* attachment) const;
+
+  // Indicates whether the pickle has any attachments.
+  virtual bool HasAttachments() const;
+
   // Reserves space for upcoming writes when multiple writes will be made and
   // their sizes are computed in advance. It can be significantly faster to call
   // Reserve() before calling WriteFoo() multiple times.
diff --git a/base/pickle_unittest.cc b/base/pickle_unittest.cc
index b195a81..e00edd9 100644
--- a/base/pickle_unittest.cc
+++ b/base/pickle_unittest.cc
@@ -2,15 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/pickle.h"
+
 #include <limits.h>
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/pickle.h"
 #include "base/strings/string16.h"
 #include "base/strings/utf_string_conversions.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -27,7 +28,6 @@
 const uint32_t testuint32 = 1593847192;
 const int64_t testint64 = -0x7E8CA9253104BDFCLL;
 const uint64_t testuint64 = 0xCE8CA9253104BDF7ULL;
-const size_t testsizet = 0xFEDC7654;
 const float testfloat = 3.1415926935f;
 const double testdouble = 2.71828182845904523;
 const std::string teststring("Hello world");  // note non-aligned string length
@@ -73,10 +73,6 @@
   EXPECT_TRUE(iter.ReadUInt64(&outuint64));
   EXPECT_EQ(testuint64, outuint64);
 
-  size_t outsizet;
-  EXPECT_TRUE(iter.ReadSizeT(&outsizet));
-  EXPECT_EQ(testsizet, outsizet);
-
   float outfloat;
   EXPECT_TRUE(iter.ReadFloat(&outfloat));
   EXPECT_EQ(testfloat, outfloat);
@@ -119,13 +115,11 @@
   EXPECT_TRUE(pickle.WriteBool(testbool1));
   EXPECT_TRUE(pickle.WriteBool(testbool2));
   EXPECT_TRUE(pickle.WriteInt(testint));
-  EXPECT_TRUE(
-      pickle.WriteLongUsingDangerousNonPortableLessPersistableForm(testlong));
+  EXPECT_TRUE(pickle.WriteLong(testlong));
   EXPECT_TRUE(pickle.WriteUInt16(testuint16));
   EXPECT_TRUE(pickle.WriteUInt32(testuint32));
   EXPECT_TRUE(pickle.WriteInt64(testint64));
   EXPECT_TRUE(pickle.WriteUInt64(testuint64));
-  EXPECT_TRUE(pickle.WriteSizeT(testsizet));
   EXPECT_TRUE(pickle.WriteFloat(testfloat));
   EXPECT_TRUE(pickle.WriteDouble(testdouble));
   EXPECT_TRUE(pickle.WriteString(teststring));
@@ -145,31 +139,32 @@
   VerifyResult(pickle3);
 }
 
-// Tests that reading/writing a size_t works correctly when the source process
+// Tests that reading/writing a long works correctly when the source process
 // is 64-bit.  We rely on having both 32- and 64-bit trybots to validate both
 // arms of the conditional in this test.
-TEST(PickleTest, SizeTFrom64Bit) {
+TEST(PickleTest, LongFrom64Bit) {
   Pickle pickle;
-  // Under the hood size_t is always written as a 64-bit value, so simulate a
-  // 64-bit size_t even on 32-bit architectures by explicitly writing a
-  // uint64_t.
-  EXPECT_TRUE(pickle.WriteUInt64(testuint64));
+  // Under the hood long is always written as a 64-bit value, so simulate a
+  // 64-bit long even on 32-bit architectures by explicitly writing an int64_t.
+  EXPECT_TRUE(pickle.WriteInt64(testint64));
 
   PickleIterator iter(pickle);
-  size_t outsizet;
-  if (sizeof(size_t) < sizeof(uint64_t)) {
-    // ReadSizeT() should return false when the original written value can't be
-    // represented as a size_t.
-    EXPECT_FALSE(iter.ReadSizeT(&outsizet));
+  long outlong;
+  if (sizeof(long) < sizeof(int64_t)) {
+    // ReadLong() should return false when the original written value can't be
+    // represented as a long.
+#if GTEST_HAS_DEATH_TEST
+    EXPECT_DEATH(ignore_result(iter.ReadLong(&outlong)), "");
+#endif
   } else {
-    EXPECT_TRUE(iter.ReadSizeT(&outsizet));
-    EXPECT_EQ(testuint64, outsizet);
+    EXPECT_TRUE(iter.ReadLong(&outlong));
+    EXPECT_EQ(testint64, outlong);
   }
 }
 
 // Tests that we can handle really small buffers.
 TEST(PickleTest, SmallBuffer) {
-  scoped_ptr<char[]> buffer(new char[1]);
+  std::unique_ptr<char[]> buffer(new char[1]);
 
   // We should not touch the buffer.
   Pickle pickle(buffer.get(), 1);
@@ -335,7 +330,7 @@
 
 TEST(PickleTest, FindNextWithIncompleteHeader) {
   size_t header_size = sizeof(Pickle::Header);
-  scoped_ptr<char[]> buffer(new char[header_size - 1]);
+  std::unique_ptr<char[]> buffer(new char[header_size - 1]);
   memset(buffer.get(), 0x1, header_size - 1);
 
   const char* start = buffer.get();
@@ -352,7 +347,7 @@
   size_t header_size = sizeof(Pickle::Header);
   size_t header_size2 = 2 * header_size;
   size_t payload_received = 100;
-  scoped_ptr<char[]> buffer(new char[header_size2 + payload_received]);
+  std::unique_ptr<char[]> buffer(new char[header_size2 + payload_received]);
   const char* start = buffer.get();
   Pickle::Header* header = reinterpret_cast<Pickle::Header*>(buffer.get());
   const char* end = start + header_size2 + payload_received;
@@ -396,7 +391,7 @@
 
 TEST(PickleTest, Resize) {
   size_t unit = Pickle::kPayloadUnit;
-  scoped_ptr<char[]> data(new char[unit]);
+  std::unique_ptr<char[]> data(new char[unit]);
   char* data_ptr = data.get();
   for (size_t i = 0; i < unit; i++)
     data_ptr[i] = 'G';
@@ -556,14 +551,14 @@
   std::string data("Hello, world!");
 
   TestingPickle pickle;
-  pickle.WriteSizeT(data.size());
+  pickle.WriteUInt32(data.size());
   void* bytes = pickle.ClaimBytes(data.size());
   pickle.WriteInt(42);
   memcpy(bytes, data.data(), data.size());
 
   PickleIterator iter(pickle);
-  size_t out_data_length;
-  EXPECT_TRUE(iter.ReadSizeT(&out_data_length));
+  uint32_t out_data_length;
+  EXPECT_TRUE(iter.ReadUInt32(&out_data_length));
   EXPECT_EQ(data.size(), out_data_length);
 
   const char* out_data = nullptr;
@@ -575,4 +570,99 @@
   EXPECT_EQ(42, out_value);
 }
 
+// Checks that PickleSizer and Pickle agree on the size of things.
+TEST(PickleTest, PickleSizer) {
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteBool(true);
+    sizer.AddBool();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteInt(42);
+    sizer.AddInt();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteLong(42);
+    sizer.AddLong();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteUInt16(42);
+    sizer.AddUInt16();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteUInt32(42);
+    sizer.AddUInt32();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteInt64(42);
+    sizer.AddInt64();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteUInt64(42);
+    sizer.AddUInt64();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteFloat(42.0f);
+    sizer.AddFloat();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteDouble(42.0);
+    sizer.AddDouble();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteString(teststring);
+    sizer.AddString(teststring);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteString16(teststring16);
+    sizer.AddString16(teststring16);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteData(testdata, testdatalen);
+    sizer.AddData(testdatalen);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteBytes(testdata, testdatalen);
+    sizer.AddBytes(testdatalen);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+}
+
 }  // namespace base
diff --git a/base/posix/global_descriptors.h b/base/posix/global_descriptors.h
index c774634..edb299d 100644
--- a/base/posix/global_descriptors.h
+++ b/base/posix/global_descriptors.h
@@ -55,7 +55,14 @@
 #if !defined(OS_ANDROID)
   static const int kBaseDescriptor = 3;  // 0, 1, 2 are already taken.
 #else
-  static const int kBaseDescriptor = 4;  // 3 used by __android_log_write().
+  // 3 used by __android_log_write().
+  // 4 used by... something important on Android M.
+  // 5 used by... something important on Android L... on low-end devices.
+  // TODO(amistry): An Android, this mechanism is only used for tests since the
+  // content child launcher spawns a process by creating a new Activity using
+  // the Android APIs. For tests, come up with a way that doesn't require using
+  // a pre-defined fd.
+  static const int kBaseDescriptor = 6;
 #endif
 
   // Return the singleton instance of GlobalDescriptors.
diff --git a/base/posix/safe_strerror.cc b/base/posix/safe_strerror.cc
index e80e8f8..798658e 100644
--- a/base/posix/safe_strerror.cc
+++ b/base/posix/safe_strerror.cc
@@ -20,7 +20,11 @@
 
 namespace base {
 
-#define USE_HISTORICAL_STRERRO_R (defined(__GLIBC__) || defined(OS_NACL))
+#if defined(__GLIBC__) || defined(OS_NACL)
+#define USE_HISTORICAL_STRERRO_R 1
+#else
+#define USE_HISTORICAL_STRERRO_R 0
+#endif
 
 #if USE_HISTORICAL_STRERRO_R && defined(__GNUC__)
 // GCC will complain about the unused second wrap function unless we tell it
diff --git a/base/posix/unix_domain_socket_linux_unittest.cc b/base/posix/unix_domain_socket_linux_unittest.cc
index e4b63c0..3f5173cf 100644
--- a/base/posix/unix_domain_socket_linux_unittest.cc
+++ b/base/posix/unix_domain_socket_linux_unittest.cc
@@ -52,7 +52,8 @@
   message_fds.clear();
 
   // Check that the thread didn't get blocked.
-  WaitableEvent event(false, false);
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
   message_thread.task_runner()->PostTask(
       FROM_HERE, Bind(&WaitableEvent::Signal, Unretained(&event)));
   ASSERT_TRUE(event.TimedWait(TimeDelta::FromMilliseconds(5000)));
diff --git a/base/power_monitor/power_monitor.h b/base/power_monitor/power_monitor.h
index 683eeb9..e025b32 100644
--- a/base/power_monitor/power_monitor.h
+++ b/base/power_monitor/power_monitor.h
@@ -20,7 +20,7 @@
 class BASE_EXPORT PowerMonitor {
  public:
   // Takes ownership of |source|.
-  explicit PowerMonitor(scoped_ptr<PowerMonitorSource> source);
+  explicit PowerMonitor(std::unique_ptr<PowerMonitorSource> source);
   ~PowerMonitor();
 
   // Get the process-wide PowerMonitor (if not present, returns NULL).
@@ -45,7 +45,7 @@
   void NotifyResume();
 
   scoped_refptr<ObserverListThreadSafe<PowerObserver> > observers_;
-  scoped_ptr<PowerMonitorSource> source_;
+  std::unique_ptr<PowerMonitorSource> source_;
 
   DISALLOW_COPY_AND_ASSIGN(PowerMonitor);
 };
diff --git a/base/prefs/OWNERS b/base/prefs/OWNERS
deleted file mode 100644
index 2d87038..0000000
--- a/base/prefs/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-battre@chromium.org
-bauerb@chromium.org
-gab@chromium.org
-pam@chromium.org
diff --git a/base/prefs/README b/base/prefs/README
deleted file mode 100644
index 52d9c43..0000000
--- a/base/prefs/README
+++ /dev/null
@@ -1,6 +0,0 @@
-Prefs is a general-purpose key-value store for application preferences.
-
-The Prefs code lives in base/prefs but is not part of the
-'base/base.gyp:base' library because of a desire to keep its use
-optional. If you use Prefs, you should add a GYP dependency on
-base/base.gyp:base_prefs.
diff --git a/base/prefs/base_prefs_export.h b/base/prefs/base_prefs_export.h
deleted file mode 100644
index 3d207db..0000000
--- a/base/prefs/base_prefs_export.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_BASE_PREFS_EXPORT_H_
-#define BASE_PREFS_BASE_PREFS_EXPORT_H_
-
-#if defined(COMPONENT_BUILD)
-#if defined(WIN32)
-
-#if defined(BASE_PREFS_IMPLEMENTATION)
-#define BASE_PREFS_EXPORT __declspec(dllexport)
-#else
-#define BASE_PREFS_EXPORT __declspec(dllimport)
-#endif  // defined(BASE_PREFS_IMPLEMENTATION)
-
-#else  // defined(WIN32)
-#if defined(BASE_PREFS_IMPLEMENTATION)
-#define BASE_PREFS_EXPORT __attribute__((visibility("default")))
-#else
-#define BASE_PREFS_EXPORT
-#endif
-#endif
-
-#else  // defined(COMPONENT_BUILD)
-#define BASE_PREFS_EXPORT
-#endif
-
-#endif  // BASE_PREFS_BASE_PREFS_EXPORT_H_
diff --git a/base/prefs/persistent_pref_store.h b/base/prefs/persistent_pref_store.h
deleted file mode 100644
index 89c7a71..0000000
--- a/base/prefs/persistent_pref_store.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PERSISTENT_PREF_STORE_H_
-#define BASE_PREFS_PERSISTENT_PREF_STORE_H_
-
-#include <string>
-
-#include "base/prefs/base_prefs_export.h"
-#include "base/prefs/writeable_pref_store.h"
-
-// This interface is complementary to the PrefStore interface, declaring
-// additional functionality that adds support for setting values and persisting
-// the data to some backing store.
-class BASE_PREFS_EXPORT PersistentPrefStore : public WriteablePrefStore {
- public:
-  // Unique integer code for each type of error so we can report them
-  // distinctly in a histogram.
-  // NOTE: Don't change the explicit values of the enums as it will change the
-  // server's meaning of the histogram.
-  enum PrefReadError {
-    PREF_READ_ERROR_NONE = 0,
-    PREF_READ_ERROR_JSON_PARSE = 1,
-    PREF_READ_ERROR_JSON_TYPE = 2,
-    PREF_READ_ERROR_ACCESS_DENIED = 3,
-    PREF_READ_ERROR_FILE_OTHER = 4,
-    PREF_READ_ERROR_FILE_LOCKED = 5,
-    PREF_READ_ERROR_NO_FILE = 6,
-    PREF_READ_ERROR_JSON_REPEAT = 7,
-    // PREF_READ_ERROR_OTHER = 8,  // Deprecated.
-    PREF_READ_ERROR_FILE_NOT_SPECIFIED = 9,
-    // Indicates that ReadPrefs() couldn't complete synchronously and is waiting
-    // for an asynchronous task to complete first.
-    PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE = 10,
-    PREF_READ_ERROR_MAX_ENUM
-  };
-
-  class ReadErrorDelegate {
-   public:
-    virtual ~ReadErrorDelegate() {}
-
-    virtual void OnError(PrefReadError error) = 0;
-  };
-
-  // Whether the store is in a pseudo-read-only mode where changes are not
-  // actually persisted to disk.  This happens in some cases when there are
-  // read errors during startup.
-  virtual bool ReadOnly() const = 0;
-
-  // Gets the read error. Only valid if IsInitializationComplete() returns true.
-  virtual PrefReadError GetReadError() const = 0;
-
-  // Reads the preferences from disk. Notifies observers via
-  // "PrefStore::OnInitializationCompleted" when done.
-  virtual PrefReadError ReadPrefs() = 0;
-
-  // Reads the preferences from disk asynchronously. Notifies observers via
-  // "PrefStore::OnInitializationCompleted" when done. Also it fires
-  // |error_delegate| if it is not NULL and reading error has occurred.
-  // Owns |error_delegate|.
-  virtual void ReadPrefsAsync(ReadErrorDelegate* error_delegate) = 0;
-
-  // Lands any pending writes to disk.
-  virtual void CommitPendingWrite() = 0;
-
-  // Schedule a write if there is any lossy data pending. Unlike
-  // CommitPendingWrite() this does not immediately sync to disk, instead it
-  // triggers an eventual write if there is lossy data pending and if there
-  // isn't one scheduled already.
-  virtual void SchedulePendingLossyWrites() = 0;
-
- protected:
-  ~PersistentPrefStore() override {}
-};
-
-#endif  // BASE_PREFS_PERSISTENT_PREF_STORE_H_
diff --git a/base/prefs/pref_filter.h b/base/prefs/pref_filter.h
deleted file mode 100644
index 82a44c6..0000000
--- a/base/prefs/pref_filter.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PREF_FILTER_H_
-#define BASE_PREFS_PREF_FILTER_H_
-
-#include <string>
-
-#include "base/callback_forward.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/prefs/base_prefs_export.h"
-
-namespace base {
-class DictionaryValue;
-class Value;
-}  // namespace base
-
-// Filters preferences as they are loaded from disk or updated at runtime.
-// Currently supported only by JsonPrefStore.
-class BASE_PREFS_EXPORT PrefFilter {
- public:
-  // A callback to be invoked when |prefs| have been read (and possibly
-  // pre-modified) and are now ready to be handed back to this callback's
-  // builder. |schedule_write| indicates whether a write should be immediately
-  // scheduled (typically because the |prefs| were pre-modified).
-  typedef base::Callback<void(scoped_ptr<base::DictionaryValue> prefs,
-                              bool schedule_write)> PostFilterOnLoadCallback;
-
-  virtual ~PrefFilter() {}
-
-  // This method is given ownership of the |pref_store_contents| read from disk
-  // before the underlying PersistentPrefStore gets to use them. It must hand
-  // them back via |post_filter_on_load_callback|, but may modify them first.
-  // Note: This method is asynchronous, which may make calls like
-  // PersistentPrefStore::ReadPrefs() asynchronous. The owner of filtered
-  // PersistentPrefStores should handle this to make the reads look synchronous
-  // to external users (see SegregatedPrefStore::ReadPrefs() for an example).
-  virtual void FilterOnLoad(
-      const PostFilterOnLoadCallback& post_filter_on_load_callback,
-      scoped_ptr<base::DictionaryValue> pref_store_contents) = 0;
-
-  // Receives notification when a pref store value is changed, before Observers
-  // are notified.
-  virtual void FilterUpdate(const std::string& path) = 0;
-
-  // Receives notification when the pref store is about to serialize data
-  // contained in |pref_store_contents| to a string. Modifications to
-  // |pref_store_contents| will be persisted to disk and also affect the
-  // in-memory state.
-  virtual void FilterSerializeData(
-      base::DictionaryValue* pref_store_contents) = 0;
-};
-
-#endif  // BASE_PREFS_PREF_FILTER_H_
diff --git a/base/prefs/pref_notifier.h b/base/prefs/pref_notifier.h
deleted file mode 100644
index e0df260..0000000
--- a/base/prefs/pref_notifier.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PREF_NOTIFIER_H_
-#define BASE_PREFS_PREF_NOTIFIER_H_
-
-#include <string>
-
-// Delegate interface used by PrefValueStore to notify its owner about changes
-// to the preference values.
-// TODO(mnissler, danno): Move this declaration to pref_value_store.h once we've
-// cleaned up all public uses of this interface.
-class PrefNotifier {
- public:
-  virtual ~PrefNotifier() {}
-
-  // Sends out a change notification for the preference identified by
-  // |pref_name|.
-  virtual void OnPreferenceChanged(const std::string& pref_name) = 0;
-
-  // Broadcasts the intialization completed notification.
-  virtual void OnInitializationCompleted(bool succeeded) = 0;
-};
-
-#endif  // BASE_PREFS_PREF_NOTIFIER_H_
diff --git a/base/prefs/pref_observer.h b/base/prefs/pref_observer.h
deleted file mode 100644
index 5d8f5b6..0000000
--- a/base/prefs/pref_observer.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PREF_OBSERVER_H_
-#define BASE_PREFS_PREF_OBSERVER_H_
-
-#include <string>
-
-class PrefService;
-
-// Used internally to the Prefs subsystem to pass preference change
-// notifications between PrefService, PrefNotifierImpl and
-// PrefChangeRegistrar.
-class PrefObserver {
- public:
-  virtual void OnPreferenceChanged(PrefService* service,
-                                   const std::string& pref_name) = 0;
-};
-
-#endif  // BASE_PREFS_PREF_OBSERVER_H_
diff --git a/base/prefs/writeable_pref_store.h b/base/prefs/writeable_pref_store.h
deleted file mode 100644
index f7da279..0000000
--- a/base/prefs/writeable_pref_store.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_WRITEABLE_PREF_STORE_H_
-#define BASE_PREFS_WRITEABLE_PREF_STORE_H_
-
-#include <stdint.h>
-
-#include <string>
-
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/prefs/pref_store.h"
-
-namespace base {
-class Value;
-}
-
-// A pref store that can be written to as well as read from.
-class BASE_PREFS_EXPORT WriteablePrefStore : public PrefStore {
- public:
-  // PrefWriteFlags can be used to change the way a pref will be written to
-  // storage.
-  enum PrefWriteFlags : uint32_t {
-    // No flags are specified.
-    DEFAULT_PREF_WRITE_FLAGS = 0,
-
-    // This marks the pref as "lossy". There is no strict time guarantee on when
-    // a lossy pref will be persisted to permanent storage when it is modified.
-    LOSSY_PREF_WRITE_FLAG = 1 << 1
-  };
-
-  WriteablePrefStore() {}
-
-  // Sets a |value| for |key| in the store. |value| must be non-NULL. |flags| is
-  // a bitmask of PrefWriteFlags.
-  virtual void SetValue(const std::string& key,
-                        scoped_ptr<base::Value> value,
-                        uint32_t flags) = 0;
-
-  // Removes the value for |key|.
-  virtual void RemoveValue(const std::string& key, uint32_t flags) = 0;
-
-  // Equivalent to PrefStore::GetValue but returns a mutable value.
-  virtual bool GetMutableValue(const std::string& key,
-                               base::Value** result) = 0;
-
-  // Triggers a value changed notification. This function needs to be called
-  // if one retrieves a list or dictionary with GetMutableValue and change its
-  // value. SetValue takes care of notifications itself. Note that
-  // ReportValueChanged will trigger notifications even if nothing has changed.
-  // |flags| is a bitmask of PrefWriteFlags.
-  virtual void ReportValueChanged(const std::string& key, uint32_t flags) = 0;
-
-  // Same as SetValue, but doesn't generate notifications. This is used by
-  // PrefService::GetMutableUserPref() in order to put empty entries
-  // into the user pref store. Using SetValue is not an option since existing
-  // tests rely on the number of notifications generated. |flags| is a bitmask
-  // of PrefWriteFlags.
-  virtual void SetValueSilently(const std::string& key,
-                                scoped_ptr<base::Value> value,
-                                uint32_t flags) = 0;
-
- protected:
-  ~WriteablePrefStore() override {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(WriteablePrefStore);
-};
-
-#endif  // BASE_PREFS_WRITEABLE_PREF_STORE_H_
diff --git a/base/process/launch.cc b/base/process/launch.cc
index f09317d..3ca5155 100644
--- a/base/process/launch.cc
+++ b/base/process/launch.cc
@@ -40,6 +40,8 @@
     {
 }
 
+LaunchOptions::LaunchOptions(const LaunchOptions& other) = default;
+
 LaunchOptions::~LaunchOptions() {
 }
 
diff --git a/base/process/launch.h b/base/process/launch.h
index 9a76e20..b8c0259 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -59,16 +59,20 @@
 #endif  // defined(OS_POSIX)
 
   LaunchOptions();
+  LaunchOptions(const LaunchOptions&);
   ~LaunchOptions();
 
   // If true, wait for the process to complete.
   bool wait;
 
+  // If not empty, change to this directory before executing the new process.
+  base::FilePath current_directory;
+
 #if defined(OS_WIN)
   bool start_hidden;
 
   // If non-null, inherit exactly the list of handles in this vector (these
-  // handles must be inheritable). This is only supported on Vista and higher.
+  // handles must be inheritable).
   HandlesToInheritVector* handles_to_inherit;
 
   // If true, the new process inherits handles from the parent. In production
@@ -76,7 +80,7 @@
   // binaries, because open handles from other libraries and subsystems will
   // leak to the child process, causing errors such as open socket hangs.
   // Note: If |handles_to_inherit| is non-null, this flag is ignored and only
-  // those handles will be inherited (on Vista and higher).
+  // those handles will be inherited.
   bool inherit_handles;
 
   // If non-null, runs as if the user represented by the token had launched it.
@@ -150,9 +154,6 @@
 #endif  // defined(OS_LINUX)
 
 #if defined(OS_POSIX)
-  // If not empty, change to this directory before execing the new process.
-  base::FilePath current_directory;
-
   // If non-null, a delegate to be run immediately prior to executing the new
   // program in the child process.
   //
@@ -256,12 +257,6 @@
 BASE_EXPORT bool GetAppOutput(const std::vector<std::string>& argv,
                               std::string* output);
 
-// A restricted version of |GetAppOutput()| which (a) clears the environment,
-// and (b) stores at most |max_output| bytes; also, it doesn't search the path
-// for the command.
-BASE_EXPORT bool GetAppOutputRestricted(const CommandLine& cl,
-                                        std::string* output, size_t max_output);
-
 // A version of |GetAppOutput()| which also returns the exit code of the
 // executed command. Returns true if the application runs and exits cleanly. If
 // this is the case the exit code of the application is available in
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 6a2f5ce..4fb1018 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -22,6 +22,7 @@
 
 #include <iterator>
 #include <limits>
+#include <memory>
 #include <set>
 
 #include "base/command_line.h"
@@ -32,16 +33,16 @@
 #include "base/files/file_util.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/process.h"
 #include "base/process/process_metrics.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/waitable_event.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
-#include "third_party/valgrind/valgrind.h"
 
 #if defined(OS_LINUX)
 #include <sys/prctl.h>
@@ -151,13 +152,17 @@
 // This function is intended to be used in between fork() and execve() and will
 // reset all signal handlers to the default.
 // The motivation for going through all of them is that sa_restorer can leak
-// from parents and help defeat ASLR on buggy kernels.  We reset it to NULL.
+// from parents and help defeat ASLR on buggy kernels.  We reset it to null.
 // See crbug.com/177956.
 void ResetChildSignalHandlersToDefaults(void) {
   for (int signum = 1; ; ++signum) {
+#if defined(ANDROID)
     struct kernel_sigaction act;
     memset(&act, 0, sizeof(act));
-    int sigaction_get_ret = sys_rt_sigaction(signum, NULL, &act);
+#else
+    struct kernel_sigaction act = {0};
+#endif
+    int sigaction_get_ret = sys_rt_sigaction(signum, nullptr, &act);
     if (sigaction_get_ret && errno == EINVAL) {
 #if !defined(NDEBUG)
       // Linux supports 32 real-time signals from 33 to 64.
@@ -176,14 +181,14 @@
     // The kernel won't allow to re-set SIGKILL or SIGSTOP.
     if (signum != SIGSTOP && signum != SIGKILL) {
       act.k_sa_handler = reinterpret_cast<void*>(SIG_DFL);
-      act.k_sa_restorer = NULL;
-      if (sys_rt_sigaction(signum, &act, NULL)) {
+      act.k_sa_restorer = nullptr;
+      if (sys_rt_sigaction(signum, &act, nullptr)) {
         RAW_LOG(FATAL, "sigaction (set) failed.");
       }
     }
 #if !defined(NDEBUG)
     // Now ask the kernel again and check that no restorer will leak.
-    if (sys_rt_sigaction(signum, NULL, &act) || act.k_sa_restorer) {
+    if (sys_rt_sigaction(signum, nullptr, &act) || act.k_sa_restorer) {
       RAW_LOG(FATAL, "Cound not fix sa_restorer.");
     }
 #endif  // !defined(NDEBUG)
@@ -202,7 +207,7 @@
 };
 
 // Automatically closes |DIR*|s.
-typedef scoped_ptr<DIR, ScopedDIRClose> ScopedDIR;
+typedef std::unique_ptr<DIR, ScopedDIRClose> ScopedDIR;
 
 #if defined(OS_LINUX)
 static const char kFDDir[] = "/proc/self/fd";
@@ -301,14 +306,14 @@
   fd_shuffle1.reserve(fd_shuffle_size);
   fd_shuffle2.reserve(fd_shuffle_size);
 
-  scoped_ptr<char* []> argv_cstr(new char* [argv.size() + 1]);
+  std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
   for (size_t i = 0; i < argv.size(); i++) {
     argv_cstr[i] = const_cast<char*>(argv[i].c_str());
   }
-  argv_cstr[argv.size()] = NULL;
+  argv_cstr[argv.size()] = nullptr;
 
-  scoped_ptr<char*[]> new_environ;
-  char* const empty_environ = NULL;
+  std::unique_ptr<char* []> new_environ;
+  char* const empty_environ = nullptr;
   char* const* old_environ = GetEnvironment();
   if (options.clear_environ)
     old_environ = &empty_environ;
@@ -430,7 +435,7 @@
       // Set process' controlling terminal.
       if (HANDLE_EINTR(setsid()) != -1) {
         if (HANDLE_EINTR(
-                ioctl(options.ctrl_terminal_fd, TIOCSCTTY, NULL)) == -1) {
+                ioctl(options.ctrl_terminal_fd, TIOCSCTTY, nullptr)) == -1) {
           RAW_LOG(WARNING, "ioctl(TIOCSCTTY), ctrl terminal not set");
         }
       } else {
@@ -511,14 +516,6 @@
   // setpriority() or sched_getscheduler, but these all require extra rights.
 }
 
-// Return value used by GetAppOutputInternal to encapsulate the various exit
-// scenarios from the function.
-enum GetAppOutputInternalResult {
-  EXECUTE_FAILURE,
-  EXECUTE_SUCCESS,
-  GOT_MAX_OUTPUT,
-};
-
 // Executes the application specified by |argv| and wait for it to exit. Stores
 // the output (stdout) in |output|. If |do_search_path| is set, it searches the
 // path for the application; in that case, |envp| must be null, and it will use
@@ -526,21 +523,14 @@
 // specify the path of the application, and |envp| will be used as the
 // environment. If |include_stderr| is true, includes stderr otherwise redirects
 // it to /dev/null.
-// If we successfully start the application and get all requested output, we
-// return GOT_MAX_OUTPUT, or if there is a problem starting or exiting
-// the application we return RUN_FAILURE. Otherwise we return EXECUTE_SUCCESS.
-// The GOT_MAX_OUTPUT return value exists so a caller that asks for limited
-// output can treat this as a success, despite having an exit code of SIG_PIPE
-// due to us closing the output pipe.
-// In the case of EXECUTE_SUCCESS, the application exit code will be returned
-// in |*exit_code|, which should be checked to determine if the application
-// ran successfully.
-static GetAppOutputInternalResult GetAppOutputInternal(
+// The return value of the function indicates success or failure. In the case of
+// success, the application exit code will be returned in |*exit_code|, which
+// should be checked to determine if the application ran successfully.
+static bool GetAppOutputInternal(
     const std::vector<std::string>& argv,
     char* const envp[],
     bool include_stderr,
     std::string* output,
-    size_t max_output,
     bool do_search_path,
     int* exit_code) {
   // Doing a blocking wait for another command to finish counts as IO.
@@ -552,7 +542,7 @@
   int pipe_fd[2];
   pid_t pid;
   InjectiveMultimap fd_shuffle1, fd_shuffle2;
-  scoped_ptr<char*[]> argv_cstr(new char*[argv.size() + 1]);
+  std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
 
   fd_shuffle1.reserve(3);
   fd_shuffle2.reserve(3);
@@ -562,13 +552,13 @@
   DCHECK(!do_search_path ^ !envp);
 
   if (pipe(pipe_fd) < 0)
-    return EXECUTE_FAILURE;
+    return false;
 
   switch (pid = fork()) {
     case -1:  // error
       close(pipe_fd[0]);
       close(pipe_fd[1]);
-      return EXECUTE_FAILURE;
+      return false;
     case 0:  // child
       {
         // DANGER: no calls to malloc or locks are allowed from now on:
@@ -605,7 +595,7 @@
 
         for (size_t i = 0; i < argv.size(); i++)
           argv_cstr[i] = const_cast<char*>(argv[i].c_str());
-        argv_cstr[argv.size()] = NULL;
+        argv_cstr[argv.size()] = nullptr;
         if (do_search_path)
           execvp(argv_cstr[0], argv_cstr.get());
         else
@@ -620,33 +610,21 @@
         close(pipe_fd[1]);
 
         output->clear();
-        char buffer[256];
-        size_t output_buf_left = max_output;
-        ssize_t bytes_read = 1;  // A lie to properly handle |max_output == 0|
-                                 // case in the logic below.
 
-        while (output_buf_left > 0) {
-          bytes_read = HANDLE_EINTR(read(pipe_fd[0], buffer,
-                                    std::min(output_buf_left, sizeof(buffer))));
+        while (true) {
+          char buffer[256];
+          ssize_t bytes_read =
+              HANDLE_EINTR(read(pipe_fd[0], buffer, sizeof(buffer)));
           if (bytes_read <= 0)
             break;
           output->append(buffer, bytes_read);
-          output_buf_left -= static_cast<size_t>(bytes_read);
         }
         close(pipe_fd[0]);
 
         // Always wait for exit code (even if we know we'll declare
         // GOT_MAX_OUTPUT).
         Process process(pid);
-        bool success = process.WaitForExit(exit_code);
-
-        // If we stopped because we read as much as we wanted, we return
-        // GOT_MAX_OUTPUT (because the child may exit due to |SIGPIPE|).
-        if (!output_buf_left && bytes_read > 0)
-          return GOT_MAX_OUTPUT;
-        else if (success)
-          return EXECUTE_SUCCESS;
-        return EXECUTE_FAILURE;
+        return process.WaitForExit(exit_code);
       }
   }
 }
@@ -656,44 +634,27 @@
 }
 
 bool GetAppOutput(const std::vector<std::string>& argv, std::string* output) {
-  // Run |execve()| with the current environment and store "unlimited" data.
+  // Run |execve()| with the current environment.
   int exit_code;
-  GetAppOutputInternalResult result = GetAppOutputInternal(
-      argv, NULL, false, output, std::numeric_limits<std::size_t>::max(), true,
-      &exit_code);
-  return result == EXECUTE_SUCCESS && exit_code == EXIT_SUCCESS;
+  bool result =
+      GetAppOutputInternal(argv, nullptr, false, output, true, &exit_code);
+  return result && exit_code == EXIT_SUCCESS;
 }
 
 bool GetAppOutputAndError(const CommandLine& cl, std::string* output) {
-  // Run |execve()| with the current environment and store "unlimited" data.
+  // Run |execve()| with the current environment.
   int exit_code;
-  GetAppOutputInternalResult result = GetAppOutputInternal(
-      cl.argv(), NULL, true, output, std::numeric_limits<std::size_t>::max(),
-      true, &exit_code);
-  return result == EXECUTE_SUCCESS && exit_code == EXIT_SUCCESS;
-}
-
-// TODO(viettrungluu): Conceivably, we should have a timeout as well, so we
-// don't hang if what we're calling hangs.
-bool GetAppOutputRestricted(const CommandLine& cl,
-                            std::string* output, size_t max_output) {
-  // Run |execve()| with the empty environment.
-  char* const empty_environ = NULL;
-  int exit_code;
-  GetAppOutputInternalResult result = GetAppOutputInternal(
-      cl.argv(), &empty_environ, false, output, max_output, false, &exit_code);
-  return result == GOT_MAX_OUTPUT || (result == EXECUTE_SUCCESS &&
-                                      exit_code == EXIT_SUCCESS);
+  bool result =
+      GetAppOutputInternal(cl.argv(), nullptr, true, output, true, &exit_code);
+  return result && exit_code == EXIT_SUCCESS;
 }
 
 bool GetAppOutputWithExitCode(const CommandLine& cl,
                               std::string* output,
                               int* exit_code) {
-  // Run |execve()| with the current environment and store "unlimited" data.
-  GetAppOutputInternalResult result = GetAppOutputInternal(
-      cl.argv(), NULL, false, output, std::numeric_limits<std::size_t>::max(),
-      true, exit_code);
-  return result == EXECUTE_SUCCESS;
+  // Run |execve()| with the current environment.
+  return GetAppOutputInternal(cl.argv(), nullptr, false, output, true,
+                              exit_code);
 }
 
 #endif  // !defined(OS_NACL_NONSFI)
@@ -736,9 +697,9 @@
   // internal pid cache. The libc interface unfortunately requires
   // specifying a new stack, so we use setjmp/longjmp to emulate
   // fork-like behavior.
-  char stack_buf[PTHREAD_STACK_MIN];
+  char stack_buf[PTHREAD_STACK_MIN] ALIGNAS(16);
 #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS64_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
   // The stack grows downward.
   void* stack = stack_buf + sizeof(stack_buf);
 #else
@@ -772,7 +733,7 @@
 #if defined(ARCH_CPU_X86_64)
     return syscall(__NR_clone, flags, nullptr, ptid, ctid, nullptr);
 #elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_MIPS64_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
     // CONFIG_CLONE_BACKWARDS defined.
     return syscall(__NR_clone, flags, nullptr, ptid, nullptr, ctid);
 #else
diff --git a/base/process/process.h b/base/process/process.h
index 75f6a00..70c8260 100644
--- a/base/process/process.h
+++ b/base/process/process.h
@@ -6,7 +6,7 @@
 #define BASE_PROCESS_PROCESS_H_
 
 #include "base/base_export.h"
-#include "base/move.h"
+#include "base/macros.h"
 #include "base/process/process_handle.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
@@ -31,8 +31,6 @@
 // the process dies, and it may be reused by the system, which means that it may
 // end up pointing to the wrong process.
 class BASE_EXPORT Process {
-  MOVE_ONLY_TYPE_FOR_CPP_03(Process)
-
  public:
   explicit Process(ProcessHandle handle = kNullProcessHandle);
 
@@ -136,6 +134,8 @@
 #else
   ProcessHandle process_;
 #endif
+
+  DISALLOW_COPY_AND_ASSIGN(Process);
 };
 
 #if defined(OS_CHROMEOS)
diff --git a/base/process/process_iterator.cc b/base/process/process_iterator.cc
index 94f53b6..d4024d9 100644
--- a/base/process/process_iterator.cc
+++ b/base/process/process_iterator.cc
@@ -9,6 +9,7 @@
 
 #if defined(OS_POSIX)
 ProcessEntry::ProcessEntry() : pid_(0), ppid_(0), gid_(0) {}
+ProcessEntry::ProcessEntry(const ProcessEntry& other) = default;
 ProcessEntry::~ProcessEntry() {}
 #endif
 
diff --git a/base/process/process_iterator.h b/base/process/process_iterator.h
index 26fe690..0d1f1a6 100644
--- a/base/process/process_iterator.h
+++ b/base/process/process_iterator.h
@@ -41,6 +41,7 @@
 #elif defined(OS_POSIX)
 struct BASE_EXPORT ProcessEntry {
   ProcessEntry();
+  ProcessEntry(const ProcessEntry& other);
   ~ProcessEntry();
 
   ProcessId pid() const { return pid_; }
diff --git a/base/process/process_iterator_linux.cc b/base/process/process_iterator_linux.cc
index 94a3576..421565f 100644
--- a/base/process/process_iterator_linux.cc
+++ b/base/process/process_iterator_linux.cc
@@ -61,18 +61,28 @@
 ProcessIterator::ProcessIterator(const ProcessFilter* filter)
     : filter_(filter) {
   procfs_dir_ = opendir(internal::kProcDir);
+  if (!procfs_dir_) {
+    // On Android, SELinux may prevent reading /proc. See
+    // https://crbug.com/581517 for details.
+    PLOG(ERROR) << "opendir " << internal::kProcDir;
+  }
 }
 
 ProcessIterator::~ProcessIterator() {
   if (procfs_dir_) {
     closedir(procfs_dir_);
-    procfs_dir_ = NULL;
+    procfs_dir_ = nullptr;
   }
 }
 
 bool ProcessIterator::CheckForNextProcess() {
   // TODO(port): skip processes owned by different UID
 
+  if (!procfs_dir_) {
+    DLOG(ERROR) << "Skipping CheckForNextProcess(), no procfs_dir_";
+    return false;
+  }
+
   pid_t pid = kNullProcessId;
   std::vector<std::string> cmd_line_args;
   std::string stats_data;
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index a21891d..0b38726 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -31,8 +31,8 @@
   return system_metrics;
 }
 
-scoped_ptr<Value> SystemMetrics::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+std::unique_ptr<Value> SystemMetrics::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   res->SetInteger("committed_memory", static_cast<int>(committed_memory_));
 #if defined(OS_LINUX) || defined(OS_ANDROID)
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 8d8f7fc..57cb3ab 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -236,7 +236,10 @@
 // Returns 0 if it can't compute the commit charge.
 BASE_EXPORT size_t GetSystemCommitCharge();
 
-// Returns the number of bytes in a memory page.
+// Returns the number of bytes in a memory page. Do not use this to compute
+// the number of pages in a block of memory for calling mincore(). On some
+// platforms, e.g. iOS, mincore() uses a different page size from what is
+// returned by GetPageSize().
 BASE_EXPORT size_t GetPageSize();
 
 #if defined(OS_POSIX)
@@ -261,13 +264,22 @@
 // Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
 struct BASE_EXPORT SystemMemoryInfoKB {
   SystemMemoryInfoKB();
+  SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
 
   // Serializes the platform specific fields to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
   int total;
   int free;
 
+#if defined(OS_LINUX)
+  // This provides an estimate of available memory as described here:
+  // https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
+  // NOTE: this is ONLY valid in kernels 3.14 and up.  Its value will always
+  // be 0 in earlier kernel versions.
+  int available;
+#endif
+
 #if !defined(OS_MACOSX)
   int swap_total;
   int swap_free;
@@ -336,9 +348,10 @@
 // Data from /proc/diskstats about system-wide disk I/O.
 struct BASE_EXPORT SystemDiskInfo {
   SystemDiskInfo();
+  SystemDiskInfo(const SystemDiskInfo& other);
 
   // Serializes the platform specific fields to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
   uint64_t reads;
   uint64_t reads_merged;
@@ -375,7 +388,7 @@
   }
 
   // Serializes the platform specific fields to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
   uint64_t num_reads;
   uint64_t num_writes;
@@ -399,7 +412,7 @@
   static SystemMetrics Sample();
 
   // Serializes the system metrics to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
  private:
   FRIEND_TEST_ALL_PREFIXES(SystemMetricsTest, SystemMetrics);
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index bcebcf5..3d27656 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -86,7 +86,8 @@
       return value;
     }
   }
-  NOTREACHED();
+  // This can be reached if the process dies when proc is read -- in that case,
+  // the kernel can return missing fields.
   return 0;
 }
 
@@ -533,6 +534,9 @@
 SystemMemoryInfoKB::SystemMemoryInfoKB() {
   total = 0;
   free = 0;
+#if defined(OS_LINUX)
+  available = 0;
+#endif
   buffers = 0;
   cached = 0;
   active_anon = 0;
@@ -555,11 +559,17 @@
 #endif
 }
 
-scoped_ptr<Value> SystemMemoryInfoKB::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+    default;
+
+std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   res->SetInteger("total", total);
   res->SetInteger("free", free);
+#if defined(OS_LINUX)
+  res->SetInteger("available", available);
+#endif
   res->SetInteger("buffers", buffers);
   res->SetInteger("cached", cached);
   res->SetInteger("active_anon", active_anon);
@@ -617,6 +627,10 @@
       target = &meminfo->total;
     else if (tokens[0] == "MemFree:")
       target = &meminfo->free;
+#if defined(OS_LINUX)
+    else if (tokens[0] == "MemAvailable:")
+      target = &meminfo->available;
+#endif
     else if (tokens[0] == "Buffers:")
       target = &meminfo->buffers;
     else if (tokens[0] == "Cached:")
@@ -766,8 +780,10 @@
   weighted_io_time = 0;
 }
 
-scoped_ptr<Value> SystemDiskInfo::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+SystemDiskInfo::SystemDiskInfo(const SystemDiskInfo& other) = default;
+
+std::unique_ptr<Value> SystemDiskInfo::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   // Write out uint64_t variables as doubles.
   // Note: this may discard some precision, but for JS there's no other option.
@@ -892,8 +908,8 @@
 }
 
 #if defined(OS_CHROMEOS)
-scoped_ptr<Value> SwapInfo::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+std::unique_ptr<Value> SwapInfo::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   // Write out uint64_t variables as doubles.
   // Note: this may discard some precision, but for JS there's no other option.
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index d947ce7..8b5d564 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -84,6 +84,9 @@
   free = 0;
 }
 
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+    default;
+
 // Getting a mach task from a pid for another process requires permissions in
 // general, so there doesn't really seem to be a way to do these (and spinning
 // up ps to fetch each stats seems dangerous to put in a base api for anyone to
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index 96ba6ce..94a2ffe 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -303,7 +303,7 @@
 // calls to it.
 TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
   ProcessHandle handle = GetCurrentProcessHandle();
-  scoped_ptr<ProcessMetrics> metrics(
+  std::unique_ptr<ProcessMetrics> metrics(
       ProcessMetrics::CreateProcessMetrics(handle));
 
   EXPECT_GE(metrics->GetCPUUsage(), 0.0);
@@ -424,7 +424,7 @@
   ASSERT_GT(initial_threads, 0);
   const int kNumAdditionalThreads = 10;
   {
-    scoped_ptr<Thread> my_threads[kNumAdditionalThreads];
+    std::unique_ptr<Thread> my_threads[kNumAdditionalThreads];
     for (int i = 0; i < kNumAdditionalThreads; ++i) {
       my_threads[i].reset(new Thread("GetNumberOfThreadsTest"));
       my_threads[i]->Start();
@@ -496,9 +496,21 @@
   ASSERT_TRUE(child.IsValid());
   WaitForEvent(temp_path, kSignalClosed);
 
-  scoped_ptr<ProcessMetrics> metrics(
+  std::unique_ptr<ProcessMetrics> metrics(
       ProcessMetrics::CreateProcessMetrics(child.Handle()));
-  EXPECT_EQ(0, metrics->GetOpenFdCount());
+  // Try a couple times to observe the child with 0 fds open.
+  // Sometimes we've seen that the child can have 1 remaining
+  // fd shortly after receiving the signal.  Potentially this
+  // is actually the signal file still open in the child.
+  int open_fds = -1;
+  for (int tries = 0; tries < 5; ++tries) {
+    open_fds = metrics->GetOpenFdCount();
+    if (!open_fds) {
+      break;
+    }
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+  }
+  EXPECT_EQ(0, open_fds);
   ASSERT_TRUE(child.Terminate(0, true));
 }
 #endif  // defined(OS_LINUX)
diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc
index 248fc80..ba9b544 100644
--- a/base/process/process_posix.cc
+++ b/base/process/process_posix.cc
@@ -13,6 +13,7 @@
 #include "base/logging.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/kill.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
 #include "build/build_config.h"
 
 #if defined(OS_MACOSX)
@@ -100,8 +101,12 @@
     return false;
   }
 
+#if defined(ANDROID)
   struct kevent change;
   memset(&change, 0, sizeof(change));
+#else
+  struct kevent change = {0};
+#endif
   EV_SET(&change, handle, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
   int result = HANDLE_EINTR(kevent(kq.get(), &change, 1, NULL, 0, NULL));
   if (result == -1) {
@@ -125,8 +130,12 @@
   }
 
   result = -1;
+#if defined(ANDROID)
   struct kevent event;
   memset(&event, 0, sizeof(event));
+#else
+  struct kevent event = {0};
+#endif
 
   while (wait_forever || remaining_delta > base::TimeDelta()) {
     struct timespec remaining_timespec;
@@ -296,7 +305,7 @@
 }
 
 #if !defined(OS_NACL_NONSFI)
-bool Process::Terminate(int /* exit_code */, bool wait) const {
+bool Process::Terminate(int /*exit_code*/, bool wait) const {
   // exit_code isn't supportable.
   DCHECK(IsValid());
   CHECK_GT(process_, 0);
diff --git a/base/profiler/alternate_timer.cc b/base/profiler/alternate_timer.cc
deleted file mode 100644
index b2d2c70..0000000
--- a/base/profiler/alternate_timer.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/profiler/alternate_timer.h"
-
-namespace {
-
-tracked_objects::NowFunction* g_time_function = nullptr;
-tracked_objects::TimeSourceType g_time_source_type =
-    tracked_objects::TIME_SOURCE_TYPE_WALL_TIME;
-
-}  // anonymous namespace
-
-namespace tracked_objects {
-
-const char kAlternateProfilerTime[] = "CHROME_PROFILER_TIME";
-
-// Set an alternate timer function to replace the OS time function when
-// profiling.
-void SetAlternateTimeSource(NowFunction* now_function, TimeSourceType type) {
-  g_time_function = now_function;
-  g_time_source_type = type;
-}
-
-NowFunction* GetAlternateTimeSource() {
-  return g_time_function;
-}
-
-TimeSourceType GetTimeSourceType() {
-  return g_time_source_type;
-}
-
-}  // namespace tracked_objects
diff --git a/base/profiler/alternate_timer.h b/base/profiler/alternate_timer.h
deleted file mode 100644
index fdc75dc..0000000
--- a/base/profiler/alternate_timer.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a glue file, which allows third party code to call into our profiler
-// without having to include most any functions from base.
-
-#ifndef BASE_PROFILER_ALTERNATE_TIMER_H_
-#define BASE_PROFILER_ALTERNATE_TIMER_H_
-
-#include "base/base_export.h"
-
-namespace tracked_objects {
-
-enum TimeSourceType {
-  TIME_SOURCE_TYPE_WALL_TIME,
-  TIME_SOURCE_TYPE_TCMALLOC
-};
-
-// Provide type for an alternate timer function.
-typedef unsigned int NowFunction();
-
-// Environment variable name that is used to activate alternate timer profiling
-// (such as using TCMalloc allocations to provide a pseudo-timer) for tasks
-// instead of wall clock profiling.
-BASE_EXPORT extern const char kAlternateProfilerTime[];
-
-// Set an alternate timer function to replace the OS time function when
-// profiling.  Typically this is called by an allocator that is providing a
-// function that indicates how much memory has been allocated on any given
-// thread.
-BASE_EXPORT void SetAlternateTimeSource(NowFunction* now_function,
-                                        TimeSourceType type);
-
-// Gets the pointer to a function that was set via SetAlternateTimeSource().
-// Returns NULL if no set was done prior to calling GetAlternateTimeSource.
-NowFunction* GetAlternateTimeSource();
-
-// Returns the type of the currently set time source.
-BASE_EXPORT TimeSourceType GetTimeSourceType();
-
-}  // namespace tracked_objects
-
-#endif  // BASE_PROFILER_ALTERNATE_TIMER_H_
diff --git a/base/rand_util_unittest.cc b/base/rand_util_unittest.cc
index ea803ee..4f46b80 100644
--- a/base/rand_util_unittest.cc
+++ b/base/rand_util_unittest.cc
@@ -9,9 +9,9 @@
 
 #include <algorithm>
 #include <limits>
+#include <memory>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -143,7 +143,7 @@
   const int kTestIterations = 10;
   const size_t kTestBufferSize = 1 * 1024 * 1024;
 
-  scoped_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
+  std::unique_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
   const base::TimeTicks now = base::TimeTicks::Now();
   for (int i = 0; i < kTestIterations; ++i)
     base::RandBytes(buffer.get(), kTestBufferSize);
diff --git a/base/run_loop.cc b/base/run_loop.cc
index b8558db..a2322f8 100644
--- a/base/run_loop.cc
+++ b/base/run_loop.cc
@@ -8,10 +8,6 @@
 #include "base/tracked_objects.h"
 #include "build/build_config.h"
 
-#if defined(OS_WIN)
-#include "base/message_loop/message_pump_dispatcher.h"
-#endif
-
 namespace base {
 
 RunLoop::RunLoop()
@@ -23,25 +19,8 @@
       running_(false),
       quit_when_idle_received_(false),
       weak_factory_(this) {
-#if defined(OS_WIN)
-   dispatcher_ = NULL;
-#endif
 }
 
-#if defined(OS_WIN)
-RunLoop::RunLoop(MessagePumpDispatcher* dispatcher)
-    : loop_(MessageLoop::current()),
-      previous_run_loop_(NULL),
-      dispatcher_(dispatcher),
-      run_depth_(0),
-      run_called_(false),
-      quit_called_(false),
-      running_(false),
-      quit_when_idle_received_(false),
-      weak_factory_(this) {
-}
-#endif
-
 RunLoop::~RunLoop() {
 }
 
@@ -72,10 +51,18 @@
   }
 }
 
+void RunLoop::QuitWhenIdle() {
+  quit_when_idle_received_ = true;
+}
+
 base::Closure RunLoop::QuitClosure() {
   return base::Bind(&RunLoop::Quit, weak_factory_.GetWeakPtr());
 }
 
+base::Closure RunLoop::QuitWhenIdleClosure() {
+  return base::Bind(&RunLoop::QuitWhenIdle, weak_factory_.GetWeakPtr());
+}
+
 bool RunLoop::BeforeRun() {
   DCHECK(!run_called_);
   run_called_ = true;
@@ -89,6 +76,9 @@
   run_depth_ = previous_run_loop_? previous_run_loop_->run_depth_ + 1 : 1;
   loop_->run_loop_ = this;
 
+  if (run_depth_ > 1)
+    loop_->NotifyBeginNestedLoop();
+
   running_ = true;
   return true;
 }
diff --git a/base/run_loop.h b/base/run_loop.h
index e23d073..635018f 100644
--- a/base/run_loop.h
+++ b/base/run_loop.h
@@ -17,10 +17,6 @@
 class MessagePumpForUI;
 #endif
 
-#if defined(OS_WIN)
-class MessagePumpDispatcher;
-#endif
-
 #if defined(OS_IOS)
 class MessagePumpUIApplication;
 #endif
@@ -33,15 +29,12 @@
 class BASE_EXPORT RunLoop {
  public:
   RunLoop();
-#if defined(OS_WIN)
-  explicit RunLoop(MessagePumpDispatcher* dispatcher);
-#endif
   ~RunLoop();
 
   // Run the current MessageLoop. This blocks until Quit is called. Before
-  // calling Run, be sure to grab an AsWeakPtr or the QuitClosure in order to
-  // stop the MessageLoop asynchronously. MessageLoop::QuitWhenIdle and QuitNow
-  // will also trigger a return from Run, but those are deprecated.
+  // calling Run, be sure to grab the QuitClosure in order to stop the
+  // MessageLoop asynchronously. MessageLoop::QuitWhenIdle and QuitNow will also
+  // trigger a return from Run, but those are deprecated.
   void Run();
 
   // Run the current MessageLoop until it doesn't find any tasks or messages in
@@ -51,26 +44,32 @@
 
   bool running() const { return running_; }
 
-  // Quit an earlier call to Run(). There can be other nested RunLoops servicing
-  // the same task queue (MessageLoop); Quitting one RunLoop has no bearing on
-  // the others. Quit can be called before, during or after Run. If called
-  // before Run, Run will return immediately when called. Calling Quit after the
-  // RunLoop has already finished running has no effect.
+  // Quit() quits an earlier call to Run() immediately. QuitWhenIdle() quits an
+  // earlier call to Run() when there aren't any tasks or messages in the queue.
   //
-  // WARNING: You must NEVER assume that a call to Quit will terminate the
-  // targetted message loop. If a nested message loop continues running, the
-  // target may NEVER terminate. It is very easy to livelock (run forever) in
-  // such a case.
+  // There can be other nested RunLoops servicing the same task queue
+  // (MessageLoop); Quitting one RunLoop has no bearing on the others. Quit()
+  // and QuitWhenIdle() can be called before, during or after Run(). If called
+  // before Run(), Run() will return immediately when called. Calling Quit() or
+  // QuitWhenIdle() after the RunLoop has already finished running has no
+  // effect.
+  //
+  // WARNING: You must NEVER assume that a call to Quit() or QuitWhenIdle() will
+  // terminate the targetted message loop. If a nested message loop continues
+  // running, the target may NEVER terminate. It is very easy to livelock (run
+  // forever) in such a case.
   void Quit();
+  void QuitWhenIdle();
 
-  // Convenience method to get a closure that safely calls Quit (has no effect
-  // if the RunLoop instance is gone).
+  // Convenience methods to get a closure that safely calls Quit() or
+  // QuitWhenIdle() (has no effect if the RunLoop instance is gone).
   //
   // Example:
   //   RunLoop run_loop;
   //   PostTask(run_loop.QuitClosure());
   //   run_loop.Run();
   base::Closure QuitClosure();
+  base::Closure QuitWhenIdleClosure();
 
  private:
   friend class MessageLoop;
@@ -95,10 +94,6 @@
   // Parent RunLoop or NULL if this is the top-most RunLoop.
   RunLoop* previous_run_loop_;
 
-#if defined(OS_WIN)
-  MessagePumpDispatcher* dispatcher_;
-#endif
-
   // Used to count how many nested Run() invocations are on the stack.
   int run_depth_;
 
diff --git a/base/scoped_generic.h b/base/scoped_generic.h
index d41f195..84de6b7 100644
--- a/base/scoped_generic.h
+++ b/base/scoped_generic.h
@@ -11,7 +11,6 @@
 
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/move.h"
 
 namespace base {
 
@@ -54,8 +53,6 @@
 //   typedef ScopedGeneric<int, FooScopedTraits> ScopedFoo;
 template<typename T, typename Traits>
 class ScopedGeneric {
-  DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(ScopedGeneric)
-
  private:
   // This must be first since it's used inline below.
   //
@@ -160,6 +157,8 @@
       const ScopedGeneric<T2, Traits2>& p2) const;
 
   Data data_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedGeneric);
 };
 
 template<class T, class Traits>
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index eebe6e0..af9d2bf 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -12,10 +12,11 @@
 
 #include <algorithm>
 #include <limits>
+#include <memory>
 
 #include "base/files/file_util.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/free_deleter.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -86,7 +87,7 @@
   }
 }
 
-#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_MACOSX)
+#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_LINUX)
 #define MAYBE_NewOverflow DISABLED_NewOverflow
 #else
 #define MAYBE_NewOverflow NewOverflow
@@ -94,7 +95,8 @@
 // Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
 // IOS doesn't honor nothrow, so disable the test there.
 // Crashes on Windows Dbg builds, disable there as well.
-// Fails on Mac 10.8 http://crbug.com/227092
+// Disabled on Linux because failing Linux Valgrind bot, and Valgrind exclusions
+// are not currently read. See http://crbug.com/582398
 TEST(SecurityTest, MAYBE_NewOverflow) {
   const size_t kArraySize = 4096;
   // We want something "dynamic" here, so that the compiler doesn't
@@ -107,8 +109,8 @@
   const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
   const size_t kDynamicArraySize2 = HideValueFromCompiler(kArraySize2);
   {
-    scoped_ptr<char[][kArraySize]> array_pointer(new (nothrow)
-        char[kDynamicArraySize2][kArraySize]);
+    std::unique_ptr<char[][kArraySize]> array_pointer(
+        new (nothrow) char[kDynamicArraySize2][kArraySize]);
     OverflowTestsSoftExpectTrue(!array_pointer);
   }
   // On windows, the compiler prevents static array sizes of more than
@@ -117,8 +119,8 @@
   ALLOW_UNUSED_LOCAL(kDynamicArraySize);
 #else
   {
-    scoped_ptr<char[][kArraySize2]> array_pointer(new (nothrow)
-        char[kDynamicArraySize][kArraySize2]);
+    std::unique_ptr<char[][kArraySize2]> array_pointer(
+        new (nothrow) char[kDynamicArraySize][kArraySize2]);
     OverflowTestsSoftExpectTrue(!array_pointer);
   }
 #endif  // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
@@ -154,7 +156,7 @@
   // 1 MB should get us past what TCMalloc pre-allocated before initializing
   // the sophisticated allocators.
   size_t kAllocSize = 1<<20;
-  scoped_ptr<char, base::FreeDeleter> ptr(
+  std::unique_ptr<char, base::FreeDeleter> ptr(
       static_cast<char*>(malloc(kAllocSize)));
   ASSERT_TRUE(ptr != NULL);
   // If two pointers are separated by less than 512MB, they are considered
diff --git a/base/sequence_checker_unittest.cc b/base/sequence_checker_unittest.cc
index e261b04..196bb1c 100644
--- a/base/sequence_checker_unittest.cc
+++ b/base/sequence_checker_unittest.cc
@@ -6,6 +6,7 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
@@ -14,7 +15,6 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
 #include "base/test/sequenced_worker_pool_owner.h"
 #include "base/threading/thread.h"
@@ -94,10 +94,9 @@
   }
 
   void PostDeleteToOtherThread(
-      scoped_ptr<SequenceCheckedObject> sequence_checked_object) {
-    other_thread()->message_loop()->DeleteSoon(
-        FROM_HERE,
-        sequence_checked_object.release());
+      std::unique_ptr<SequenceCheckedObject> sequence_checked_object) {
+    other_thread()->message_loop()->task_runner()->DeleteSoon(
+        FROM_HERE, sequence_checked_object.release());
   }
 
   // Destroys the SequencedWorkerPool instance, blocking until it is fully shut
@@ -115,11 +114,11 @@
  private:
   MessageLoop message_loop_;  // Needed by SequencedWorkerPool to function.
   base::Thread other_thread_;
-  scoped_ptr<SequencedWorkerPoolOwner> pool_owner_;
+  std::unique_ptr<SequencedWorkerPoolOwner> pool_owner_;
 };
 
 TEST_F(SequenceCheckerTest, CallsAllowedOnSameThread) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // Verify that DoStuff doesn't assert.
@@ -130,7 +129,7 @@
 }
 
 TEST_F(SequenceCheckerTest, DestructorAllowedOnDifferentThread) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // Verify the destructor doesn't assert when called on a different thread.
@@ -139,7 +138,7 @@
 }
 
 TEST_F(SequenceCheckerTest, DetachFromSequence) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // Verify that DoStuff doesn't assert when called on a different thread after
@@ -151,7 +150,7 @@
 }
 
 TEST_F(SequenceCheckerTest, SameSequenceTokenValid) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -166,7 +165,7 @@
 }
 
 TEST_F(SequenceCheckerTest, DetachSequenceTokenValid) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -186,7 +185,7 @@
 #if GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::MethodOnDifferentThreadDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // DoStuff should assert in debug builds only when called on a
@@ -210,7 +209,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::DetachThenCallFromDifferentThreadDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // DoStuff doesn't assert when called on a different thread
@@ -239,7 +238,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::DifferentSequenceTokensDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -268,7 +267,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::WorkerPoolAndSimpleThreadDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -295,7 +294,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::TwoDifferentWorkerPoolsDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
diff --git a/base/strings/safe_sprintf_unittest.cc b/base/strings/safe_sprintf_unittest.cc
new file mode 100644
index 0000000..1a21728
--- /dev/null
+++ b/base/strings/safe_sprintf_unittest.cc
@@ -0,0 +1,763 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests on Android are currently very flaky. No need to add more flaky
+// tests, as they just make it hard to spot real problems.
+// TODO(markus): See if the restrictions on Android can eventually be lifted.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define ALLOW_DEATH_TEST
+#endif
+
+namespace base {
+namespace strings {
+
+TEST(SafeSPrintfTest, Empty) {
+  char buf[2] = { 'X', 'X' };
+
+  // Negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 1, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // A larger buffer should leave the trailing bytes unchanged.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 2, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(0, SafeSPrintf(buf, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+}
+
+TEST(SafeSPrintfTest, NoArguments) {
+  // Output a text message that doesn't require any substitutions. This
+  // is roughly equivalent to calling strncpy() (but unlike strncpy(), it does
+  // always add a trailing NUL; it always deduplicates '%' characters).
+  static const char text[] = "hello world";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(ref));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 1, text));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 2, text));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, sizeof(buf), text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%X"));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%X"));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%X"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%X"), "src.1. == '%'");
+#endif
+}
+
+TEST(SafeSPrintfTest, OneArgument) {
+  // Test basic single-argument single-character substitution.
+  const char text[] = "hello world";
+  const char fmt[]  = "hello%cworld";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(buf));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, 1, fmt, ' '));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, 2, fmt, ' '));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, sizeof(buf), fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%Y", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%Y", 0));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%", 0), "ch");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, MissingArg) {
+#if defined(NDEBUG)
+  char buf[20];
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c", 'A'));
+  EXPECT_EQ("A%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  char buf[20];
+  EXPECT_DEATH(SafeSPrintf(buf, "%c%c", 'A'), "cur_arg < max_args");
+#endif
+}
+
+TEST(SafeSPrintfTest, ASANFriendlyBufferTest) {
+  // Print into a buffer that is sized exactly to size. ASAN can verify that
+  // nobody attempts to write past the end of the buffer.
+  // There is a more complicated test in PrintLongString() that covers a lot
+  // more edge case, but it is also harder to debug in case of a failure.
+  const char kTestString[] = "This is a test";
+  std::unique_ptr<char[]> buf(new char[sizeof(kTestString)]);
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), "%s", kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+}
+
+TEST(SafeSPrintfTest, NArgs) {
+  // Pre-C++11 compilers have a different code path, that can only print
+  // up to ten distinct arguments.
+  // We test both SafeSPrintf() and SafeSNPrintf(). This makes sure we don't
+  // have typos in the copy-n-pasted code that is needed to deal with various
+  // numbers of arguments.
+  char buf[12];
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSPrintf(buf, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c",
+                           1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+  // Repeat all the tests with SafeSNPrintf() instead of SafeSPrintf().
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+  EXPECT_EQ(1, SafeSNPrintf(buf, 11, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSNPrintf(buf, 11, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSNPrintf(buf, 11, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSNPrintf(buf, 11, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSNPrintf(buf, 11, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c%c",
+                             1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+
+  EXPECT_EQ(11, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+  EXPECT_EQ(11, SafeSNPrintf(buf, 12, "%c%c%c%c%c%c%c%c%c%c%c",
+                             1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+}
+
+TEST(SafeSPrintfTest, DataTypes) {
+  char buf[40];
+
+  // Bytes
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%d", (uint8_t)-1));
+  EXPECT_EQ("255", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int8_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%d", (int8_t)-128));
+  EXPECT_EQ("-128", std::string(buf));
+
+  // Half-words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%d", (uint16_t)-1));
+  EXPECT_EQ("65535", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int16_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%d", (int16_t)-32768));
+  EXPECT_EQ("-32768", std::string(buf));
+
+  // Words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%d", (uint32_t)-1));
+  EXPECT_EQ("4294967295", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int32_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(11, SafeSPrintf(buf, "%d", (int32_t)-2147483647-1));
+  EXPECT_EQ("-2147483648", std::string(buf));
+
+  // Quads
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (uint64_t)-1));
+  EXPECT_EQ("18446744073709551615", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int64_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (int64_t)-9223372036854775807LL-1));
+  EXPECT_EQ("-9223372036854775808", std::string(buf));
+
+  // Strings (both const and mutable).
+  EXPECT_EQ(4, SafeSPrintf(buf, "test"));
+  EXPECT_EQ("test", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, buf));
+  EXPECT_EQ("test", std::string(buf));
+
+  // Pointer
+  char addr[20];
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  SafeSPrintf(buf, "%p", (const char *)buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)sprintf);
+  SafeSPrintf(buf, "%p", sprintf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+
+  // Padding for pointers is a little more complicated because of the "0x"
+  // prefix. Padding with '0' zeros is relatively straight-forward, but
+  // padding with ' ' spaces requires more effort.
+  sprintf(addr, "0x%017llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%019p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+  memset(addr, ' ',
+         (char*)memmove(addr + sizeof(addr) - strlen(addr) - 1,
+                        addr, strlen(addr)+1) - addr);
+  SafeSPrintf(buf, "%19p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+}
+
+namespace {
+void PrintLongString(char* buf, size_t sz) {
+  // Output a reasonably complex expression into a limited-size buffer.
+  // At least one byte is available for writing the NUL character.
+  CHECK_GT(sz, static_cast<size_t>(0));
+
+  // Allocate slightly more space, so that we can verify that SafeSPrintf()
+  // never writes past the end of the buffer.
+  std::unique_ptr<char[]> tmp(new char[sz + 2]);
+  memset(tmp.get(), 'X', sz+2);
+
+  // Use SafeSPrintf() to output a complex list of arguments:
+  // - test padding and truncating %c single characters.
+  // - test truncating %s simple strings.
+  // - test mismatching arguments and truncating (for %d != %s).
+  // - test zero-padding and truncating %x hexadecimal numbers.
+  // - test outputting and truncating %d MININT.
+  // - test outputting and truncating %p arbitrary pointer values.
+  // - test outputting, padding and truncating NULL-pointer %s strings.
+  char* out = tmp.get();
+  size_t out_sz = sz;
+  size_t len;
+  for (std::unique_ptr<char[]> perfect_buf;;) {
+    size_t needed = SafeSNPrintf(out, out_sz,
+#if defined(NDEBUG)
+                            "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
+#else
+                            "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
+#endif
+                            0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
+                            PrintLongString, static_cast<char*>(NULL)) + 1;
+
+    // Various sanity checks:
+    // The numbered of characters needed to print the full string should always
+    // be bigger or equal to the bytes that have actually been output.
+    len = strlen(tmp.get());
+    CHECK_GE(needed, len+1);
+
+    // The number of characters output should always fit into the buffer that
+    // was passed into SafeSPrintf().
+    CHECK_LT(len, out_sz);
+
+    // The output is always terminated with a NUL byte (actually, this test is
+    // always going to pass, as strlen() already verified this)
+    EXPECT_FALSE(tmp[len]);
+
+    // ASAN can check that we are not overwriting buffers, iff we make sure the
+    // buffer is exactly the size that we are expecting to be written. After
+    // running SafeSNPrintf() the first time, it is possible to compute the
+    // correct buffer size for this test. So, allocate a second buffer and run
+    // the exact same SafeSNPrintf() command again.
+    if (!perfect_buf.get()) {
+      out_sz = std::min(needed, sz);
+      out = new char[out_sz];
+      perfect_buf.reset(out);
+    } else {
+      break;
+    }
+  }
+
+  // All trailing bytes are unchanged.
+  for (size_t i = len+1; i < sz+2; ++i)
+    EXPECT_EQ('X', tmp[i]);
+
+  // The text that was generated by SafeSPrintf() should always match the
+  // equivalent text generated by sprintf(). Please note that the format
+  // string for sprintf() is not complicated, as it does not have the
+  // benefit of getting type information from the C++ compiler.
+  //
+  // N.B.: It would be so much cleaner to use snprintf(). But unfortunately,
+  //       Visual Studio doesn't support this function, and the work-arounds
+  //       are all really awkward.
+  char ref[256];
+  CHECK_LE(sz, sizeof(ref));
+  sprintf(ref, "A long string: %%d 00DEADBEEF %lld 0x%llX <NULL>",
+          static_cast<long long>(std::numeric_limits<intptr_t>::min()),
+          static_cast<unsigned long long>(
+            reinterpret_cast<uintptr_t>(PrintLongString)));
+  ref[sz-1] = '\000';
+
+#if defined(NDEBUG)
+  const size_t kSSizeMax = std::numeric_limits<ssize_t>::max();
+#else
+  const size_t kSSizeMax = internal::GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+  // Compare the output from SafeSPrintf() to the one from sprintf().
+  EXPECT_EQ(std::string(ref).substr(0, kSSizeMax-1), std::string(tmp.get()));
+
+  // We allocated a slightly larger buffer, so that we could perform some
+  // extra sanity checks. Now that the tests have all passed, we copy the
+  // data to the output buffer that the caller provided.
+  memcpy(buf, tmp.get(), len+1);
+}
+
+#if !defined(NDEBUG)
+class ScopedSafeSPrintfSSizeMaxSetter {
+ public:
+  ScopedSafeSPrintfSSizeMaxSetter(size_t sz) {
+    old_ssize_max_ = internal::GetSafeSPrintfSSizeMaxForTest();
+    internal::SetSafeSPrintfSSizeMaxForTest(sz);
+  }
+
+  ~ScopedSafeSPrintfSSizeMaxSetter() {
+    internal::SetSafeSPrintfSSizeMaxForTest(old_ssize_max_);
+  }
+
+ private:
+  size_t old_ssize_max_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSafeSPrintfSSizeMaxSetter);
+};
+#endif
+
+}  // anonymous namespace
+
+TEST(SafeSPrintfTest, Truncation) {
+  // We use PrintLongString() to print a complex long string and then
+  // truncate to all possible lengths. This ends up exercising a lot of
+  // different code paths in SafeSPrintf() and IToASCII(), as truncation can
+  // happen in a lot of different states.
+  char ref[256];
+  PrintLongString(ref, sizeof(ref));
+  for (size_t i = strlen(ref)+1; i; --i) {
+    char buf[sizeof(ref)];
+    PrintLongString(buf, i);
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // When compiling in debug mode, we have the ability to fake a small
+  // upper limit for the maximum value that can be stored in an ssize_t.
+  // SafeSPrintf() uses this upper limit to determine how many bytes it will
+  // write to the buffer, even if the caller claimed a bigger buffer size.
+  // Repeat the truncation test and verify that this other code path in
+  // SafeSPrintf() works correctly, too.
+#if !defined(NDEBUG)
+  for (size_t i = strlen(ref)+1; i > 1; --i) {
+    ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(i);
+    char buf[sizeof(ref)];
+    PrintLongString(buf, sizeof(buf));
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // kSSizeMax is also used to constrain the maximum amount of padding, before
+  // SafeSPrintf() detects an error in the format string.
+  ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(100);
+  char buf[256];
+  EXPECT_EQ(99, SafeSPrintf(buf, "%99c", ' '));
+  EXPECT_EQ(std::string(99, ' '), std::string(buf));
+  *buf = '\000';
+#if defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%100c", ' '), "padding <= max_padding");
+#endif
+  EXPECT_EQ(0, *buf);
+#endif
+}
+
+TEST(SafeSPrintfTest, Padding) {
+  char buf[40], fmt[40];
+
+  // Chars %c
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 'A'));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2c", 'A'));
+  EXPECT_EQ("%-2c", std::string(buf));
+  SafeSPrintf(fmt, "%%%dc", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1, SafeSPrintf(buf, fmt, 'A'));
+  SafeSPrintf(fmt, "%%%dc",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 'A'));
+  EXPECT_EQ("%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 'A'), "padding <= max_padding");
+#endif
+
+  // Octal %o
+  EXPECT_EQ(1, SafeSPrintf(buf, "%o", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2o", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02o", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%12o", -1));
+  EXPECT_EQ(" 37777777777", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%012o", -1));
+  EXPECT_EQ("037777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%23o", -1LL));
+  EXPECT_EQ(" 1777777777777777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%023o", -1LL));
+  EXPECT_EQ("01777777777777777777777", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2o", 0111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2o", 1));
+  EXPECT_EQ("%-2o", std::string(buf));
+  SafeSPrintf(fmt, "%%%do", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%do", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%do",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%o", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Decimals %d
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2d", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02d", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%3d", -1));
+  EXPECT_EQ(" -1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%03d", -1));
+  EXPECT_EQ("-01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2d", 111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%2d", -111));
+  EXPECT_EQ("-111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2d", 1));
+  EXPECT_EQ("%-2d", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dd", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%d", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Hex %X
+  EXPECT_EQ(1, SafeSPrintf(buf, "%X", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2X", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02X", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%9X", -1));
+  EXPECT_EQ(" FFFFFFFF", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%09X", -1));
+  EXPECT_EQ("0FFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%17X", -1LL));
+  EXPECT_EQ(" FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%017X", -1LL));
+  EXPECT_EQ("0FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2X", 0x111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2X", 1));
+  EXPECT_EQ("%-2X", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dX", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%X", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Pointer %p
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", (void*)1));
+  EXPECT_EQ("0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%4p", (void*)1));
+  EXPECT_EQ(" 0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%04p", (void*)1));
+  EXPECT_EQ("0x01", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%4p", (void*)0x111));
+  EXPECT_EQ("0x111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2p", (void*)1));
+  EXPECT_EQ("%-2p", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dp", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("0x0", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%p", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // String
+  EXPECT_EQ(1, SafeSPrintf(buf, "%s", "A"));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2s", "AAA"));
+  EXPECT_EQ("AAA", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2s", "A"));
+  EXPECT_EQ("%-2s", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%ds", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, "A"));
+  EXPECT_EQ("%s", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, "A"), "padding <= max_padding");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmbeddedNul) {
+  char buf[] = { 'X', 'X', 'X', 'X' };
+  EXPECT_EQ(2, SafeSPrintf(buf, "%3c", 0));
+  EXPECT_EQ(' ', buf[0]);
+  EXPECT_EQ(' ', buf[1]);
+  EXPECT_EQ(0,   buf[2]);
+  EXPECT_EQ('X', buf[3]);
+
+  // Check handling of a NUL format character. N.B. this takes two different
+  // code paths depending on whether we are actually passing arguments. If
+  // we don't have any arguments, we are running in the fast-path code, that
+  // looks (almost) like a strncpy().
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ("%%", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+  EXPECT_EQ("%%", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmitNULL) {
+  char buf[40];
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion-null"
+#endif
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", NULL));
+  EXPECT_EQ("0", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", NULL));
+  EXPECT_EQ("0x0", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%s", NULL));
+  EXPECT_EQ("<NULL>", std::string(buf));
+#if defined(__GCC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+TEST(SafeSPrintfTest, PointerSize) {
+  // The internal data representation is a 64bit value, independent of the
+  // native word size. We want to perform sign-extension for signed integers,
+  // but we want to avoid doing so for pointer types. This could be a
+  // problem on systems, where pointers are only 32bit. This tests verifies
+  // that there is no such problem.
+  char *str = reinterpret_cast<char *>(0x80000000u);
+  void *ptr = str;
+  char buf[40];
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", str));
+  EXPECT_EQ("0x80000000", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", ptr));
+  EXPECT_EQ("0x80000000", std::string(buf));
+}
+
+}  // namespace strings
+}  // namespace base
diff --git a/base/strings/string16.h b/base/strings/string16.h
index e47669c..30f4e3e 100644
--- a/base/strings/string16.h
+++ b/base/strings/string16.h
@@ -29,6 +29,8 @@
 #include <stddef.h>
 #include <stdint.h>
 #include <stdio.h>
+
+#include <functional>
 #include <string>
 
 #include "base/base_export.h"
@@ -46,6 +48,8 @@
 
 #elif defined(WCHAR_T_IS_UTF32)
 
+#include <wchar.h>  // for mbstate_t
+
 namespace base {
 
 typedef uint16_t char16;
@@ -182,6 +186,21 @@
 extern template
 class BASE_EXPORT std::basic_string<base::char16, base::string16_char_traits>;
 
+// Specialize std::hash for base::string16. Although the style guide forbids
+// this in general, it is necessary for consistency with WCHAR_T_IS_UTF16
+// platforms, where base::string16 is a type alias for std::wstring.
+namespace std {
+template <>
+struct hash<base::string16> {
+  std::size_t operator()(const base::string16& s) const {
+    std::size_t result = 0;
+    for (base::char16 c : s)
+      result = (result * 131) + c;
+    return result;
+  }
+};
+}  // namespace std
+
 #endif  // WCHAR_T_IS_UTF32
 
 #endif  // BASE_STRINGS_STRING16_H_
diff --git a/base/strings/string16_unittest.cc b/base/strings/string16_unittest.cc
index 4e58218..0d2ca80 100644
--- a/base/strings/string16_unittest.cc
+++ b/base/strings/string16_unittest.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include <sstream>
+#include <unordered_set>
 
 #include "base/strings/string16.h"
 
@@ -11,8 +12,6 @@
 
 namespace base {
 
-#if defined(WCHAR_T_IS_UTF32)
-
 // We define a custom operator<< for string16 so we can use it with logging.
 // This tests that conversion.
 TEST(String16Test, OutputStream) {
@@ -53,6 +52,15 @@
   }
 }
 
-#endif
+TEST(String16Test, Hash) {
+  string16 str1 = ASCIIToUTF16("hello");
+  string16 str2 = ASCIIToUTF16("world");
+
+  std::unordered_set<string16> set;
+
+  set.insert(str1);
+  EXPECT_EQ(1u, set.count(str1));
+  EXPECT_EQ(0u, set.count(str2));
+}
 
 }  // namespace base
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
index f869b41..09aeb44 100644
--- a/base/strings/string_number_conversions.cc
+++ b/base/strings/string_number_conversions.cc
@@ -12,9 +12,9 @@
 #include <limits>
 
 #include "base/logging.h"
-#include "base/numerics/safe_conversions.h"
 #include "base/numerics/safe_math.h"
-#include "base/strings/utf_string_conversions.h"
+#include "base/scoped_clear_errno.h"
+#include "base/scoped_clear_errno.h"
 
 namespace base {
 
@@ -144,6 +144,7 @@
 
     if (begin != end && *begin == '-') {
       if (!std::numeric_limits<value_type>::is_signed) {
+        *output = 0;
         valid = false;
       } else if (!Negative::Invoke(begin + 1, end, output)) {
         valid = false;
diff --git a/base/strings/string_number_conversions.h b/base/strings/string_number_conversions.h
index 4a50284..a95544e 100644
--- a/base/strings/string_number_conversions.h
+++ b/base/strings/string_number_conversions.h
@@ -25,6 +25,14 @@
 // Please do not add "convenience" functions for converting strings to integers
 // that return the value and ignore success/failure. That encourages people to
 // write code that doesn't properly handle the error conditions.
+//
+// DO NOT use these functions in any UI unless it's NOT localized on purpose.
+// Instead, use base::MessageFormatter for a complex message with numbers
+// (integer, float, double) embedded or base::Format{Number,Double,Percent} to
+// just format a single number/percent. Note that some languages use native
+// digits instead of ASCII digits while others use a group separator or decimal
+// point different from ',' and '.'. Using these functions in the UI would lead
+// numbers to be formatted in a non-native way.
 // ----------------------------------------------------------------------------
 
 namespace base {
diff --git a/base/strings/string_number_conversions_unittest.cc b/base/strings/string_number_conversions_unittest.cc
index 0ed06a1..91191e0 100644
--- a/base/strings/string_number_conversions_unittest.cc
+++ b/base/strings/string_number_conversions_unittest.cc
@@ -13,6 +13,7 @@
 #include <cmath>
 #include <limits>
 
+#include "base/bit_cast.h"
 #include "base/format_macros.h"
 #include "base/macros.h"
 #include "base/strings/stringprintf.h"
@@ -137,12 +138,12 @@
   };
 
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    int output = 0;
+    int output = cases[i].output ^ 1;  // Ensure StringToInt wrote something.
     EXPECT_EQ(cases[i].success, StringToInt(cases[i].input, &output));
     EXPECT_EQ(cases[i].output, output);
 
     string16 utf16_input = UTF8ToUTF16(cases[i].input);
-    output = 0;
+    output = cases[i].output ^ 1;  // Ensure StringToInt wrote something.
     EXPECT_EQ(cases[i].success, StringToInt(utf16_input, &output));
     EXPECT_EQ(cases[i].output, output);
   }
@@ -201,12 +202,13 @@
   };
 
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    unsigned output = 0;
+    unsigned output =
+        cases[i].output ^ 1;  // Ensure StringToUint wrote something.
     EXPECT_EQ(cases[i].success, StringToUint(cases[i].input, &output));
     EXPECT_EQ(cases[i].output, output);
 
     string16 utf16_input = UTF8ToUTF16(cases[i].input);
-    output = 0;
+    output = cases[i].output ^ 1;  // Ensure StringToUint wrote something.
     EXPECT_EQ(cases[i].success, StringToUint(utf16_input, &output));
     EXPECT_EQ(cases[i].output, output);
   }
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
index 31e7596..eaec14d 100644
--- a/base/strings/string_piece.h
+++ b/base/strings/string_piece.h
@@ -28,7 +28,6 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/containers/hash_tables.h"
 #include "base/logging.h"
 #include "base/strings/string16.h"
 
@@ -224,6 +223,8 @@
   }
 
   value_type operator[](size_type i) const { return ptr_[i]; }
+  value_type front() const { return ptr_[0]; }
+  value_type back() const { return ptr_[length_ - 1]; }
 
   void remove_prefix(size_type n) {
     ptr_ += n;
@@ -432,38 +433,32 @@
 BASE_EXPORT std::ostream& operator<<(std::ostream& o,
                                      const StringPiece& piece);
 
-}  // namespace base
-
 // Hashing ---------------------------------------------------------------------
 
 // We provide appropriate hash functions so StringPiece and StringPiece16 can
 // be used as keys in hash sets and maps.
 
-// This hash function is copied from base/containers/hash_tables.h. We don't
-// use the ones already defined for string and string16 directly because it
-// would require the string constructors to be called, which we don't want.
-#define HASH_STRING_PIECE(StringPieceType, string_piece)                \
-  std::size_t result = 0;                                               \
-  for (StringPieceType::const_iterator i = string_piece.begin();        \
-       i != string_piece.end(); ++i)                                    \
-    result = (result * 131) + *i;                                       \
-  return result;                                                        \
+// This hash function is copied from base/strings/string16.h. We don't use the
+// ones already defined for string and string16 directly because it would
+// require the string constructors to be called, which we don't want.
+#define HASH_STRING_PIECE(StringPieceType, string_piece)         \
+  std::size_t result = 0;                                        \
+  for (StringPieceType::const_iterator i = string_piece.begin(); \
+       i != string_piece.end(); ++i)                             \
+    result = (result * 131) + *i;                                \
+  return result;
 
-namespace BASE_HASH_NAMESPACE {
-
-template<>
-struct hash<base::StringPiece> {
-  std::size_t operator()(const base::StringPiece& sp) const {
-    HASH_STRING_PIECE(base::StringPiece, sp);
+struct StringPieceHash {
+  std::size_t operator()(const StringPiece& sp) const {
+    HASH_STRING_PIECE(StringPiece, sp);
   }
 };
-template<>
-struct hash<base::StringPiece16> {
-  std::size_t operator()(const base::StringPiece16& sp16) const {
-    HASH_STRING_PIECE(base::StringPiece16, sp16);
+struct StringPiece16Hash {
+  std::size_t operator()(const StringPiece16& sp16) const {
+    HASH_STRING_PIECE(StringPiece16, sp16);
   }
 };
 
-}  // namespace BASE_HASH_NAMESPACE
+}  // namespace base
 
 #endif  // BASE_STRINGS_STRING_PIECE_H_
diff --git a/base/strings/string_tokenizer_unittest.cc b/base/strings/string_tokenizer_unittest.cc
new file mode 100644
index 0000000..d391845
--- /dev/null
+++ b/base/strings/string_tokenizer_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_tokenizer.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+
+namespace base {
+
+namespace {
+
+TEST(StringTokenizerTest, Simple) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, Reset) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+
+  for (int i = 0; i < 2; ++i) {
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("this"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("is"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("a"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("test"), t.token());
+
+    EXPECT_FALSE(t.GetNext());
+    t.Reset();
+  }
+}
+
+TEST(StringTokenizerTest, RetDelims) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+  t.set_options(StringTokenizer::RETURN_DELIMS);
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ManyDelims) {
+  string input = "this: is, a-test";
+  StringTokenizer t(input, ": ,-");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseHeader) {
+  string input = "Content-Type: text/html ; charset=UTF-8";
+  StringTokenizer t(input, ": ;=");
+  t.set_options(StringTokenizer::RETURN_DELIMS);
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("Content-Type"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(":"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("text/html"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(";"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("charset"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string("="), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("UTF-8"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString) {
+  string input = "foo bar 'hello world' baz";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hello world'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("baz"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Malformed) {
+  string input = "bar 'hello wo";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hello wo"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Multiple) {
+  string input = "bar 'hel\"lo\" wo' baz\"";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'\"");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hel\"lo\" wo'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("baz\""), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes) {
+  string input = "foo 'don\\'t do that'";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'don\\'t do that'"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes2) {
+  string input = "foo='a, b', bar";
+  StringTokenizer t(input, ", ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo='a, b'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/strings/string_util.cc b/base/strings/string_util.cc
index e8000ab..cb668ed 100644
--- a/base/strings/string_util.cc
+++ b/base/strings/string_util.cc
@@ -23,7 +23,6 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/singleton.h"
-#include "base/strings/string_split.h"
 #include "base/strings/utf_string_conversion_utils.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/third_party/icu/icu_utf.h"
@@ -888,6 +887,7 @@
     const std::vector<OutStringType>& subst,
     std::vector<size_t>* offsets) {
   size_t substitutions = subst.size();
+  DCHECK_LT(substitutions, 10U);
 
   size_t sub_length = 0;
   for (const auto& cur : subst)
@@ -901,7 +901,6 @@
     if ('$' == *i) {
       if (i + 1 != format_string.end()) {
         ++i;
-        DCHECK('$' == *i || '1' <= *i) << "Invalid placeholder: " << *i;
         if ('$' == *i) {
           while (i != format_string.end() && '$' == *i) {
             formatted.push_back('$');
@@ -909,14 +908,11 @@
           }
           --i;
         } else {
-          uintptr_t index = 0;
-          while (i != format_string.end() && '0' <= *i && *i <= '9') {
-            index *= 10;
-            index += *i - '0';
-            ++i;
+          if (*i < '1' || *i > '9') {
+            DLOG(ERROR) << "Invalid placeholder: $" << *i;
+            continue;
           }
-          --i;
-          index -= 1;
+          uintptr_t index = *i - '1';
           if (offsets) {
             ReplacementOffset r_offset(index,
                 static_cast<int>(formatted.size()));
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
index f1d708a..0ee077c 100644
--- a/base/strings/string_util.h
+++ b/base/strings/string_util.h
@@ -21,9 +21,11 @@
 #include "base/strings/string_piece.h"  // For implicit conversions.
 #include "build/build_config.h"
 
+#if defined(ANDROID)
 // On Android, bionic's stdio.h defines an snprintf macro when being built with
 // clang. Undefine it here so it won't collide with base::snprintf().
 #undef snprintf
+#endif  // defined(ANDROID)
 
 namespace base {
 
@@ -341,7 +343,15 @@
 }
 template <typename Char>
 inline bool IsAsciiAlpha(Char c) {
-  return ((c >= 'A') && (c <= 'Z')) || ((c >= 'a') && (c <= 'z'));
+  return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+template <typename Char>
+inline bool IsAsciiUpper(Char c) {
+  return c >= 'A' && c <= 'Z';
+}
+template <typename Char>
+inline bool IsAsciiLower(Char c) {
+  return c >= 'a' && c <= 'z';
 }
 template <typename Char>
 inline bool IsAsciiDigit(Char c) {
@@ -433,7 +443,7 @@
 BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
                                 StringPiece16 separator);
 
-// Replace $1-$2-$3..$9 in the format string with |a|-|b|-|c|..|i| respectively.
+// Replace $1-$2-$3..$9 in the format string with values from |subst|.
 // Additionally, any number of consecutive '$' characters is replaced by that
 // number less one. Eg $$->$, $$$->$$, etc. The offsets parameter here can be
 // NULL. This only allows you to use up to nine replacements.
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
index 79eed61..df2226e 100644
--- a/base/strings/string_util_unittest.cc
+++ b/base/strings/string_util_unittest.cc
@@ -820,9 +820,9 @@
 
   string16 formatted =
       ReplaceStringPlaceholders(
-          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$1g,$2h,$3i"), subst, NULL);
+          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$1g,$2h,$3i"), subst, nullptr);
 
-  EXPECT_EQ(formatted, ASCIIToUTF16("9aa,8bb,7cc,d,e,f,9ag,8bh,7ci"));
+  EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,d,e,f,9ag,8bh,7ci"), formatted);
 }
 
 TEST(StringUtilTest, ReplaceStringPlaceholders) {
@@ -839,35 +839,25 @@
 
   string16 formatted =
       ReplaceStringPlaceholders(
-          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i"), subst, NULL);
+          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i"), subst, nullptr);
 
-  EXPECT_EQ(formatted, ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii"));
+  EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii"), formatted);
 }
 
-TEST(StringUtilTest, ReplaceStringPlaceholdersMoreThan9Replacements) {
+TEST(StringUtilTest, ReplaceStringPlaceholdersOneDigit) {
   std::vector<string16> subst;
-  subst.push_back(ASCIIToUTF16("9a"));
-  subst.push_back(ASCIIToUTF16("8b"));
-  subst.push_back(ASCIIToUTF16("7c"));
-  subst.push_back(ASCIIToUTF16("6d"));
-  subst.push_back(ASCIIToUTF16("5e"));
-  subst.push_back(ASCIIToUTF16("4f"));
-  subst.push_back(ASCIIToUTF16("3g"));
-  subst.push_back(ASCIIToUTF16("2h"));
-  subst.push_back(ASCIIToUTF16("1i"));
-  subst.push_back(ASCIIToUTF16("0j"));
-  subst.push_back(ASCIIToUTF16("-1k"));
-  subst.push_back(ASCIIToUTF16("-2l"));
-  subst.push_back(ASCIIToUTF16("-3m"));
-  subst.push_back(ASCIIToUTF16("-4n"));
-
+  subst.push_back(ASCIIToUTF16("1a"));
   string16 formatted =
-      ReplaceStringPlaceholders(
-          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i,"
-                       "$10j,$11k,$12l,$13m,$14n,$1"), subst, NULL);
+      ReplaceStringPlaceholders(ASCIIToUTF16(" $16 "), subst, nullptr);
+  EXPECT_EQ(ASCIIToUTF16(" 1a6 "), formatted);
+}
 
-  EXPECT_EQ(formatted, ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,"
-                                    "1ii,0jj,-1kk,-2ll,-3mm,-4nn,9a"));
+TEST(StringUtilTest, ReplaceStringPlaceholdersInvalidPlaceholder) {
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("1a"));
+  string16 formatted =
+      ReplaceStringPlaceholders(ASCIIToUTF16("+$-+$A+$1+"), subst, nullptr);
+  EXPECT_EQ(ASCIIToUTF16("+++1a+"), formatted);
 }
 
 TEST(StringUtilTest, StdStringReplaceStringPlaceholders) {
@@ -884,9 +874,9 @@
 
   std::string formatted =
       ReplaceStringPlaceholders(
-          "$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i", subst, NULL);
+          "$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i", subst, nullptr);
 
-  EXPECT_EQ(formatted, "9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii");
+  EXPECT_EQ("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii", formatted);
 }
 
 TEST(StringUtilTest, ReplaceStringPlaceholdersConsecutiveDollarSigns) {
@@ -894,7 +884,7 @@
   subst.push_back("a");
   subst.push_back("b");
   subst.push_back("c");
-  EXPECT_EQ(ReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst, NULL),
+  EXPECT_EQ(ReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst, nullptr),
             "$1 $$2 $$$3");
 }
 
diff --git a/base/strings/stringize_macros_unittest.cc b/base/strings/stringize_macros_unittest.cc
new file mode 100644
index 0000000..d7f9e56
--- /dev/null
+++ b/base/strings/stringize_macros_unittest.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringize_macros.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Macros as per documentation in header file.
+#define PREPROCESSOR_UTIL_UNITTEST_A FOO
+#define PREPROCESSOR_UTIL_UNITTEST_B(x) myobj->FunctionCall(x)
+#define PREPROCESSOR_UTIL_UNITTEST_C "foo"
+
+TEST(StringizeTest, Ansi) {
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_A",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_B(y)",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_C",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_C));
+
+  EXPECT_STREQ("FOO", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ("myobj->FunctionCall(y)",
+               STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ("\"foo\"", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_C));
+}
diff --git a/base/strings/utf_string_conversion_utils.cc b/base/strings/utf_string_conversion_utils.cc
index 807e22d..22058a5 100644
--- a/base/strings/utf_string_conversion_utils.cc
+++ b/base/strings/utf_string_conversion_utils.cc
@@ -55,7 +55,7 @@
 
 #if defined(WCHAR_T_IS_UTF32)
 bool ReadUnicodeCharacter(const wchar_t* src,
-                          int32_t /* src_len */,
+                          int32_t /*src_len*/,
                           int32_t* char_index,
                           uint32_t* code_point) {
   // Conversion is easy since the source is 32-bit.
diff --git a/base/sync_socket_posix.cc b/base/sync_socket_posix.cc
index 34fa5cd..995c8e9 100644
--- a/base/sync_socket_posix.cc
+++ b/base/sync_socket_posix.cc
@@ -105,9 +105,8 @@
   return descriptor.fd;
 }
 
-bool SyncSocket::PrepareTransitDescriptor(
-    ProcessHandle /* peer_process_handle */,
-    TransitDescriptor* descriptor) {
+bool SyncSocket::PrepareTransitDescriptor(ProcessHandle /*peer_process_handle*/,
+                                          TransitDescriptor* descriptor) {
   descriptor->fd = handle();
   descriptor->auto_close = false;
   return descriptor->fd != kInvalidHandle;
@@ -223,7 +222,7 @@
   DCHECK_LE(length, kMaxMessageLength);
   DCHECK_NE(handle_, kInvalidHandle);
 
-  const long flags = fcntl(handle_, F_GETFL, NULL);
+  const int flags = fcntl(handle_, F_GETFL);
   if (flags != -1 && (flags & O_NONBLOCK) == 0) {
     // Set the socket to non-blocking mode for sending if its original mode
     // is blocking.
diff --git a/base/synchronization/condition_variable.h b/base/synchronization/condition_variable.h
index a41b2ba..ebf90d2 100644
--- a/base/synchronization/condition_variable.h
+++ b/base/synchronization/condition_variable.h
@@ -75,9 +75,12 @@
 #include <pthread.h>
 #endif
 
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
 namespace base {
 
-class ConditionVarImpl;
 class TimeDelta;
 
 class BASE_EXPORT ConditionVariable {
@@ -100,14 +103,15 @@
  private:
 
 #if defined(OS_WIN)
-  ConditionVarImpl* impl_;
+  CONDITION_VARIABLE cv_;
+  SRWLOCK* const srwlock_;
 #elif defined(OS_POSIX)
   pthread_cond_t condition_;
   pthread_mutex_t* user_mutex_;
-#if DCHECK_IS_ON()
-  base::Lock* user_lock_;     // Needed to adjust shadow lock state on wait.
 #endif
 
+#if DCHECK_IS_ON() && (defined(OS_WIN) || defined(OS_POSIX))
+  base::Lock* const user_lock_;  // Needed to adjust shadow lock state on wait.
 #endif
 
   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
diff --git a/base/synchronization/condition_variable_unittest.cc b/base/synchronization/condition_variable_unittest.cc
index 4503922..d60b2b8 100644
--- a/base/synchronization/condition_variable_unittest.cc
+++ b/base/synchronization/condition_variable_unittest.cc
@@ -4,16 +4,18 @@
 
 // Multi-threaded tests of ConditionVariable class.
 
+#include "base/synchronization/condition_variable.h"
+
 #include <time.h>
+
 #include <algorithm>
+#include <memory>
 #include <vector>
 
 #include "base/bind.h"
 #include "base/location.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
-#include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
 #include "base/synchronization/spin_wait.h"
 #include "base/threading/platform_thread.h"
@@ -133,7 +135,7 @@
 
   const int thread_count_;
   int waiting_thread_count_;
-  scoped_ptr<PlatformThreadHandle[]> thread_handles_;
+  std::unique_ptr<PlatformThreadHandle[]> thread_handles_;
   std::vector<int> assignment_history_;  // Number of assignment per worker.
   std::vector<int> completion_history_;  // Number of completions per worker.
   int thread_started_counter_;  // Used to issue unique id to workers.
diff --git a/base/synchronization/lock.h b/base/synchronization/lock.h
index f7dd35d..fbf6cef 100644
--- a/base/synchronization/lock.h
+++ b/base/synchronization/lock.h
@@ -38,9 +38,9 @@
   Lock();
   ~Lock();
 
-  // NOTE: Although windows critical sections support recursive locks, we do not
-  // allow this, and we will commonly fire a DCHECK() if a thread attempts to
-  // acquire the lock a second time (while already holding it).
+  // NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
+  // a thread attempts to acquire the lock a second time (while already holding
+  // it).
   void Acquire() {
     lock_.Lock();
     CheckUnheldAndMark();
@@ -61,15 +61,11 @@
   void AssertAcquired() const;
 #endif  // DCHECK_IS_ON()
 
-#if defined(OS_POSIX)
-  // The posix implementation of ConditionVariable needs to be able
-  // to see our lock and tweak our debugging counters, as it releases
-  // and acquires locks inside of pthread_cond_{timed,}wait.
+#if defined(OS_POSIX) || defined(OS_WIN)
+  // Both Windows and POSIX implementations of ConditionVariable need to be
+  // able to see our lock and tweak our debugging counters, as they release and
+  // acquire locks inside of their condition variable APIs.
   friend class ConditionVariable;
-#elif defined(OS_WIN)
-  // The Windows Vista implementation of ConditionVariable needs the
-  // native handle of the critical section.
-  friend class WinVistaCondVar;
 #endif
 
  private:
diff --git a/base/synchronization/lock_impl.h b/base/synchronization/lock_impl.h
index ed85987..cbaabc7 100644
--- a/base/synchronization/lock_impl.h
+++ b/base/synchronization/lock_impl.h
@@ -24,9 +24,9 @@
 class BASE_EXPORT LockImpl {
  public:
 #if defined(OS_WIN)
-  typedef CRITICAL_SECTION NativeHandle;
+  using NativeHandle = SRWLOCK;
 #elif defined(OS_POSIX)
-  typedef pthread_mutex_t NativeHandle;
+  using NativeHandle =  pthread_mutex_t;
 #endif
 
   LockImpl();
diff --git a/base/synchronization/read_write_lock.h b/base/synchronization/read_write_lock.h
new file mode 100644
index 0000000..4c59b7b
--- /dev/null
+++ b/base/synchronization/read_write_lock.h
@@ -0,0 +1,105 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+#define BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "build/build_config.h"
+
+#if defined(OS_NACL)
+#include "base/synchronization/lock.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#else
+#  error No reader-writer lock defined for this platform.
+#endif
+
+namespace base {
+namespace subtle {
+
+// An OS-independent wrapper around reader-writer locks. There's no magic here.
+//
+// You are strongly encouraged to use base::Lock instead of this, unless you
+// can demonstrate contention and show that this would lead to an improvement.
+// This lock does not make any guarantees of fairness, which can lead to writer
+// starvation under certain access patterns. You should carefully consider your
+// writer access patterns before using this lock.
+class BASE_EXPORT ReadWriteLock {
+ public:
+  ReadWriteLock();
+  ~ReadWriteLock();
+
+  // Reader lock functions.
+  void ReadAcquire();
+  void ReadRelease();
+
+  // Writer lock functions.
+  void WriteAcquire();
+  void WriteRelease();
+
+ private:
+#if defined(OS_WIN)
+  using NativeHandle = SRWLOCK;
+#elif defined(OS_NACL)
+  using NativeHandle = Lock;
+#elif defined(OS_POSIX)
+  using NativeHandle = pthread_rwlock_t;
+#endif
+
+  NativeHandle native_handle_;
+
+#if defined(OS_NACL)
+  // Even though NaCl has a pthread_rwlock implementation, the build rules don't
+  // make it universally available. So instead, implement a slower and trivial
+  // reader-writer lock using a regular mutex.
+  // TODO(amistry): Remove this and use the posix implementation when it's
+  // available in all build configurations.
+  uint32_t readers_ = 0;
+  // base::Lock does checking to ensure the lock is acquired and released on the
+  // same thread. This is not the case for this lock, so use pthread mutexes
+  // directly here.
+  pthread_mutex_t writer_lock_ = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(ReadWriteLock);
+};
+
+class AutoReadLock {
+ public:
+  explicit AutoReadLock(ReadWriteLock& lock) : lock_(lock) {
+    lock_.ReadAcquire();
+  }
+  ~AutoReadLock() {
+    lock_.ReadRelease();
+  }
+
+ private:
+  ReadWriteLock& lock_;
+  DISALLOW_COPY_AND_ASSIGN(AutoReadLock);
+};
+
+class AutoWriteLock {
+ public:
+  explicit AutoWriteLock(ReadWriteLock& lock) : lock_(lock) {
+    lock_.WriteAcquire();
+  }
+  ~AutoWriteLock() {
+    lock_.WriteRelease();
+  }
+
+ private:
+  ReadWriteLock& lock_;
+  DISALLOW_COPY_AND_ASSIGN(AutoWriteLock);
+};
+
+}  // namespace subtle
+}  // namespace base
+
+#endif  // BASE_SYNCHRONIZATION_READ_WRITE_LOCK_H_
diff --git a/base/synchronization/read_write_lock_posix.cc b/base/synchronization/read_write_lock_posix.cc
new file mode 100644
index 0000000..e5de091
--- /dev/null
+++ b/base/synchronization/read_write_lock_posix.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/read_write_lock.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace subtle {
+
+ReadWriteLock::ReadWriteLock() : native_handle_(PTHREAD_RWLOCK_INITIALIZER) {}
+
+ReadWriteLock::~ReadWriteLock() {
+  int result = pthread_rwlock_destroy(&native_handle_);
+  DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadAcquire() {
+  int result = pthread_rwlock_rdlock(&native_handle_);
+  DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::ReadRelease() {
+  int result = pthread_rwlock_unlock(&native_handle_);
+  DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteAcquire() {
+  int result = pthread_rwlock_wrlock(&native_handle_);
+  DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+void ReadWriteLock::WriteRelease() {
+  int result = pthread_rwlock_unlock(&native_handle_);
+  DCHECK_EQ(result, 0) << ". " << strerror(result);
+}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/base/synchronization/waitable_event.h b/base/synchronization/waitable_event.h
index b5d91d0..3863e98 100644
--- a/base/synchronization/waitable_event.h
+++ b/base/synchronization/waitable_event.h
@@ -43,11 +43,18 @@
 // be better off just using an Windows event directly.
 class BASE_EXPORT WaitableEvent {
  public:
-  // If manual_reset is true, then to set the event state to non-signaled, a
-  // consumer must call the Reset method.  If this parameter is false, then the
-  // system automatically resets the event state to non-signaled after a single
-  // waiting thread has been released.
-  WaitableEvent(bool manual_reset, bool initially_signaled);
+  // Indicates whether a WaitableEvent should automatically reset the event
+  // state after a single waiting thread has been released or remain signaled
+  // until Reset() is manually invoked.
+  enum class ResetPolicy { MANUAL, AUTOMATIC };
+
+  // Indicates whether a new WaitableEvent should start in a signaled state or
+  // not.
+  enum class InitialState { SIGNALED, NOT_SIGNALED };
+
+  // Constructs a WaitableEvent with policy and initial state as detailed in
+  // the above enums.
+  WaitableEvent(ResetPolicy reset_policy, InitialState initial_state);
 
 #if defined(OS_WIN)
   // Create a WaitableEvent from an Event HANDLE which has already been
@@ -150,7 +157,7 @@
   struct WaitableEventKernel :
       public RefCountedThreadSafe<WaitableEventKernel> {
    public:
-    WaitableEventKernel(bool manual_reset, bool initially_signaled);
+    WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
 
     bool Dequeue(Waiter* waiter, void* tag);
 
diff --git a/base/synchronization/waitable_event_posix.cc b/base/synchronization/waitable_event_posix.cc
index 64d4376..b32c882 100644
--- a/base/synchronization/waitable_event_posix.cc
+++ b/base/synchronization/waitable_event_posix.cc
@@ -39,12 +39,11 @@
 // -----------------------------------------------------------------------------
 // This is just an abstract base class for waking the two types of waiters
 // -----------------------------------------------------------------------------
-WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled)
-    : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) {
-}
+WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
+                             InitialState initial_state)
+    : kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
 
-WaitableEvent::~WaitableEvent() {
-}
+WaitableEvent::~WaitableEvent() = default;
 
 void WaitableEvent::Reset() {
   base::AutoLock locked(kernel_->lock_);
@@ -348,14 +347,13 @@
 // -----------------------------------------------------------------------------
 // Private functions...
 
-WaitableEvent::WaitableEventKernel::WaitableEventKernel(bool manual_reset,
-                                                        bool initially_signaled)
-    : manual_reset_(manual_reset),
-      signaled_(initially_signaled) {
-}
+WaitableEvent::WaitableEventKernel::WaitableEventKernel(
+    ResetPolicy reset_policy,
+    InitialState initial_state)
+    : manual_reset_(reset_policy == ResetPolicy::MANUAL),
+      signaled_(initial_state == InitialState::SIGNALED) {}
 
-WaitableEvent::WaitableEventKernel::~WaitableEventKernel() {
-}
+WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
 
 // -----------------------------------------------------------------------------
 // Wake all waiting waiters. Called with lock held.
diff --git a/base/synchronization/waitable_event_unittest.cc b/base/synchronization/waitable_event_unittest.cc
index 2930409..ac5c9f1 100644
--- a/base/synchronization/waitable_event_unittest.cc
+++ b/base/synchronization/waitable_event_unittest.cc
@@ -15,7 +15,8 @@
 namespace base {
 
 TEST(WaitableEventTest, ManualBasics) {
-  WaitableEvent event(true, false);
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
 
   EXPECT_FALSE(event.IsSignaled());
 
@@ -33,7 +34,8 @@
 }
 
 TEST(WaitableEventTest, AutoBasics) {
-  WaitableEvent event(false, false);
+  WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
 
   EXPECT_FALSE(event.IsSignaled());
 
@@ -55,8 +57,10 @@
 
 TEST(WaitableEventTest, WaitManyShortcut) {
   WaitableEvent* ev[5];
-  for (unsigned i = 0; i < 5; ++i)
-    ev[i] = new WaitableEvent(false, false);
+  for (unsigned i = 0; i < 5; ++i) {
+    ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  }
 
   ev[3]->Signal();
   EXPECT_EQ(WaitableEvent::WaitMany(ev, 5), 3u);
@@ -94,7 +98,9 @@
 // Tests that a WaitableEvent can be safely deleted when |Wait| is done without
 // additional synchronization.
 TEST(WaitableEventTest, WaitAndDelete) {
-  WaitableEvent* ev = new WaitableEvent(false, false);
+  WaitableEvent* ev =
+      new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
 
   WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev);
   PlatformThreadHandle thread;
@@ -110,8 +116,10 @@
 // without additional synchronization.
 TEST(WaitableEventTest, WaitMany) {
   WaitableEvent* ev[5];
-  for (unsigned i = 0; i < 5; ++i)
-    ev[i] = new WaitableEvent(false, false);
+  for (unsigned i = 0; i < 5; ++i) {
+    ev[i] = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                              WaitableEvent::InitialState::NOT_SIGNALED);
+  }
 
   WaitableEventSignaler signaler(TimeDelta::FromMilliseconds(10), ev[2]);
   PlatformThreadHandle thread;
@@ -135,7 +143,9 @@
 #define MAYBE_TimedWait TimedWait
 #endif
 TEST(WaitableEventTest, MAYBE_TimedWait) {
-  WaitableEvent* ev = new WaitableEvent(false, false);
+  WaitableEvent* ev =
+      new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                        WaitableEvent::InitialState::NOT_SIGNALED);
 
   TimeDelta thread_delay = TimeDelta::FromMilliseconds(10);
   WaitableEventSignaler signaler(thread_delay, ev);
diff --git a/base/synchronization/waitable_event_watcher_posix.cc b/base/synchronization/waitable_event_watcher_posix.cc
index aa425f2..7cf8688 100644
--- a/base/synchronization/waitable_event_watcher_posix.cc
+++ b/base/synchronization/waitable_event_watcher_posix.cc
@@ -145,8 +145,8 @@
 
   cancel_flag_ = new Flag;
   callback_ = callback;
-  internal_callback_ =
-      base::Bind(&AsyncCallbackHelper, cancel_flag_, callback_, event);
+  internal_callback_ = base::Bind(
+      &AsyncCallbackHelper, base::RetainedRef(cancel_flag_), callback_, event);
   WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get();
 
   AutoLock locked(kernel->lock_);
diff --git a/base/sys_byteorder.h b/base/sys_byteorder.h
index ddb3f5b..8d9066c 100644
--- a/base/sys_byteorder.h
+++ b/base/sys_byteorder.h
@@ -15,27 +15,35 @@
 
 #include "build/build_config.h"
 
+#if defined(COMPILER_MSVC)
+#include <stdlib.h>
+#endif
+
 namespace base {
 
 // Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
 inline uint16_t ByteSwap(uint16_t x) {
-  return ((x & 0x00ff) << 8) | ((x & 0xff00) >> 8);
+#if defined(COMPILER_MSVC)
+  return _byteswap_ushort(x);
+#else
+  return __builtin_bswap16(x);
+#endif
 }
 
 inline uint32_t ByteSwap(uint32_t x) {
-  return ((x & 0x000000fful) << 24) | ((x & 0x0000ff00ul) << 8) |
-      ((x & 0x00ff0000ul) >> 8) | ((x & 0xff000000ul) >> 24);
+#if defined(COMPILER_MSVC)
+  return _byteswap_ulong(x);
+#else
+  return __builtin_bswap32(x);
+#endif
 }
 
 inline uint64_t ByteSwap(uint64_t x) {
-  return ((x & 0x00000000000000ffull) << 56) |
-      ((x & 0x000000000000ff00ull) << 40) |
-      ((x & 0x0000000000ff0000ull) << 24) |
-      ((x & 0x00000000ff000000ull) << 8) |
-      ((x & 0x000000ff00000000ull) >> 8) |
-      ((x & 0x0000ff0000000000ull) >> 24) |
-      ((x & 0x00ff000000000000ull) >> 40) |
-      ((x & 0xff00000000000000ull) >> 56);
+#if defined(COMPILER_MSVC)
+  return _byteswap_uint64(x);
+#else
+  return __builtin_bswap64(x);
+#endif
 }
 
 // Converts the bytes in |x| from host order (endianness) to little endian, and
diff --git a/base/sys_info.cc b/base/sys_info.cc
index cebb363..5aac9b7 100644
--- a/base/sys_info.cc
+++ b/base/sys_info.cc
@@ -28,7 +28,7 @@
     return false;
 
   int ram_size_mb = SysInfo::AmountOfPhysicalMemoryMB();
-  return (ram_size_mb > 0 && ram_size_mb < kLowMemoryDeviceThresholdMB);
+  return (ram_size_mb > 0 && ram_size_mb <= kLowMemoryDeviceThresholdMB);
 }
 
 static LazyInstance<
diff --git a/base/sys_info.h b/base/sys_info.h
index 5686dcb..b107477 100644
--- a/base/sys_info.h
+++ b/base/sys_info.h
@@ -50,6 +50,10 @@
   // or -1 on failure.
   static int64_t AmountOfFreeDiskSpace(const FilePath& path);
 
+  // Return the total disk space in bytes on the volume containing |path|, or -1
+  // on failure.
+  static int64_t AmountOfTotalDiskSpace(const FilePath& path);
+
   // Returns system uptime.
   static TimeDelta Uptime();
 
@@ -93,12 +97,6 @@
   // allocate.
   static size_t VMAllocationGranularity();
 
-#if defined(OS_POSIX) && !defined(OS_MACOSX)
-  // Returns the maximum SysV shared memory segment size, or zero if there is no
-  // limit.
-  static uint64_t MaxSharedMemorySize();
-#endif  // defined(OS_POSIX) && !defined(OS_MACOSX)
-
 #if defined(OS_CHROMEOS)
   typedef std::map<std::string, std::string> LsbReleaseMap;
 
diff --git a/base/sys_info_chromeos.cc b/base/sys_info_chromeos.cc
index e35bd0a..3794ed9 100644
--- a/base/sys_info_chromeos.cc
+++ b/base/sys_info_chromeos.cc
@@ -60,7 +60,7 @@
     is_running_on_chromeos_ = false;
 
     std::string lsb_release, lsb_release_time_str;
-    scoped_ptr<Environment> env(Environment::Create());
+    std::unique_ptr<Environment> env(Environment::Create());
     bool parsed_from_env =
         env->GetVar(kLsbReleaseKey, &lsb_release) &&
         env->GetVar(kLsbReleaseTimeKey, &lsb_release_time_str);
@@ -212,7 +212,7 @@
 // static
 void SysInfo::SetChromeOSVersionInfoForTest(const std::string& lsb_release,
                                             const Time& lsb_release_time) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   env->SetVar(kLsbReleaseKey, lsb_release);
   env->SetVar(kLsbReleaseTimeKey,
               DoubleToString(lsb_release_time.ToDoubleT()));
diff --git a/base/sys_info_linux.cc b/base/sys_info_linux.cc
index 8e1f533..298d245 100644
--- a/base/sys_info_linux.cc
+++ b/base/sys_info_linux.cc
@@ -33,28 +33,9 @@
   return AmountOfMemory(_SC_PHYS_PAGES);
 }
 
-uint64_t MaxSharedMemorySize() {
-  std::string contents;
-  base::ReadFileToString(base::FilePath("/proc/sys/kernel/shmmax"), &contents);
-  DCHECK(!contents.empty());
-  if (!contents.empty() && contents[contents.length() - 1] == '\n') {
-    contents.erase(contents.length() - 1);
-  }
-
-  uint64_t limit;
-  if (!base::StringToUint64(contents, &limit)) {
-    limit = 0;
-  }
-  DCHECK_GT(limit, 0u);
-  return limit;
-}
-
 base::LazyInstance<
     base::internal::LazySysInfoValue<int64_t, AmountOfPhysicalMemory>>::Leaky
     g_lazy_physical_memory = LAZY_INSTANCE_INITIALIZER;
-base::LazyInstance<
-    base::internal::LazySysInfoValue<uint64_t, MaxSharedMemorySize>>::Leaky
-    g_lazy_max_shared_memory = LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
 
@@ -71,11 +52,6 @@
 }
 
 // static
-uint64_t SysInfo::MaxSharedMemorySize() {
-  return g_lazy_max_shared_memory.Get().value();
-}
-
-// static
 std::string SysInfo::CPUModelName() {
 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL)
   const char kCpuModelPrefix[] = "Hardware";
diff --git a/base/sys_info_mac.cc b/base/sys_info_mac.mm
similarity index 61%
rename from base/sys_info_mac.cc
rename to base/sys_info_mac.mm
index ff1ec5c..102d99f 100644
--- a/base/sys_info_mac.cc
+++ b/base/sys_info_mac.mm
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -6,6 +6,7 @@
 
 #include <ApplicationServices/ApplicationServices.h>
 #include <CoreServices/CoreServices.h>
+#import <Foundation/Foundation.h>
 #include <mach/mach_host.h>
 #include <mach/mach_init.h>
 #include <stddef.h>
@@ -14,7 +15,9 @@
 #include <sys/types.h>
 
 #include "base/logging.h"
+#include "base/mac/mac_util.h"
 #include "base/mac/scoped_mach_port.h"
+#import "base/mac/sdk_forward_declarations.h"
 #include "base/macros.h"
 #include "base/strings/stringprintf.h"
 
@@ -36,12 +39,36 @@
 void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
                                             int32_t* minor_version,
                                             int32_t* bugfix_version) {
-  Gestalt(gestaltSystemVersionMajor,
-      reinterpret_cast<SInt32*>(major_version));
-  Gestalt(gestaltSystemVersionMinor,
-      reinterpret_cast<SInt32*>(minor_version));
-  Gestalt(gestaltSystemVersionBugFix,
-      reinterpret_cast<SInt32*>(bugfix_version));
+#if defined(MAC_OS_X_VERSION_10_10)
+  NSProcessInfo* processInfo = [NSProcessInfo processInfo];
+  if ([processInfo respondsToSelector:@selector(operatingSystemVersion)]) {
+    NSOperatingSystemVersion version = [processInfo operatingSystemVersion];
+    *major_version = version.majorVersion;
+    *minor_version = version.minorVersion;
+    *bugfix_version = version.patchVersion;
+  } else {
+#else
+  // Android buildbots are too old and have trouble using the forward
+  // declarations for some reason. Conditionally-compile the above block
+  // only when building on a more modern version of OS X.
+  if (true) {
+#endif
+    // -[NSProcessInfo operatingSystemVersion] is documented available in 10.10.
+    // It's also available via a private API since 10.9.2. For the remaining
+    // cases in 10.9, rely on ::Gestalt(..). Since this code is only needed for
+    // 10.9.0 and 10.9.1 and uses the recommended replacement thereafter,
+    // suppress the warning for this fallback case.
+    DCHECK(base::mac::IsOSMavericks());
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+    Gestalt(gestaltSystemVersionMajor,
+            reinterpret_cast<SInt32*>(major_version));
+    Gestalt(gestaltSystemVersionMinor,
+            reinterpret_cast<SInt32*>(minor_version));
+    Gestalt(gestaltSystemVersionBugFix,
+            reinterpret_cast<SInt32*>(bugfix_version));
+#pragma clang diagnostic pop
+  }
 }
 
 // static
diff --git a/base/sys_info_posix.cc b/base/sys_info_posix.cc
index 85ae039..5d1c450 100644
--- a/base/sys_info_posix.cc
+++ b/base/sys_info_posix.cc
@@ -73,6 +73,20 @@
     base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
     g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
 
+bool GetDiskSpaceInfo(const base::FilePath& path,
+                      int64_t* available_bytes,
+                      int64_t* total_bytes) {
+  struct statvfs stats;
+  if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+    return false;
+
+  if (available_bytes)
+    *available_bytes = static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+  if (total_bytes)
+    *total_bytes = static_cast<int64_t>(stats.f_blocks) * stats.f_frsize;
+  return true;
+}
+
 }  // namespace
 
 namespace base {
@@ -92,10 +106,20 @@
 int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
   base::ThreadRestrictions::AssertIOAllowed();
 
-  struct statvfs stats;
-  if (HANDLE_EINTR(statvfs(path.value().c_str(), &stats)) != 0)
+  int64_t available;
+  if (!GetDiskSpaceInfo(path, &available, nullptr))
     return -1;
-  return static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+  return available;
+}
+
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+  base::ThreadRestrictions::AssertIOAllowed();
+
+  int64_t total;
+  if (!GetDiskSpaceInfo(path, nullptr, &total))
+    return -1;
+  return total;
 }
 
 #if !defined(OS_MACOSX) && !defined(OS_ANDROID)
diff --git a/base/sys_info_unittest.cc b/base/sys_info_unittest.cc
index 3f284ba..0231df6 100644
--- a/base/sys_info_unittest.cc
+++ b/base/sys_info_unittest.cc
@@ -16,13 +16,6 @@
 typedef PlatformTest SysInfoTest;
 using base::FilePath;
 
-#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_ANDROID)
-TEST_F(SysInfoTest, MaxSharedMemorySize) {
-  // We aren't actually testing that it's correct, just that it's sane.
-  EXPECT_GT(base::SysInfo::MaxSharedMemorySize(), 0u);
-}
-#endif
-
 TEST_F(SysInfoTest, NumProcs) {
   // We aren't actually testing that it's correct, just that it's sane.
   EXPECT_GE(base::SysInfo::NumberOfProcessors(), 1);
@@ -40,7 +33,15 @@
   // We aren't actually testing that it's correct, just that it's sane.
   FilePath tmp_path;
   ASSERT_TRUE(base::GetTempDir(&tmp_path));
-  EXPECT_GT(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+  EXPECT_GE(base::SysInfo::AmountOfFreeDiskSpace(tmp_path), 0)
+            << tmp_path.value();
+}
+
+TEST_F(SysInfoTest, AmountOfTotalDiskSpace) {
+  // We aren't actually testing that it's correct, just that it's sane.
+  FilePath tmp_path;
+  ASSERT_TRUE(base::GetTempDir(&tmp_path));
+  EXPECT_GT(base::SysInfo::AmountOfTotalDiskSpace(tmp_path), 0)
             << tmp_path.value();
 }
 
diff --git a/base/task/cancelable_task_tracker.cc b/base/task/cancelable_task_tracker.cc
index 375ff8b..6f39410 100644
--- a/base/task/cancelable_task_tracker.cc
+++ b/base/task/cancelable_task_tracker.cc
@@ -16,7 +16,7 @@
 #include "base/single_thread_task_runner.h"
 #include "base/synchronization/cancellation_flag.h"
 #include "base/task_runner.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 using base::Bind;
 using base::CancellationFlag;
@@ -39,7 +39,7 @@
 }
 
 bool IsCanceled(const CancellationFlag* flag,
-                base::ScopedClosureRunner* /* cleanup_runner */) {
+                base::ScopedClosureRunner* /*cleanup_runner*/) {
   return flag->IsSet();
 }
 
@@ -131,9 +131,10 @@
 
   // Will always run |untrack_and_delete_flag| on current MessageLoop.
   base::ScopedClosureRunner* untrack_and_delete_flag_runner =
-      new base::ScopedClosureRunner(Bind(&RunOrPostToTaskRunner,
-                                         base::ThreadTaskRunnerHandle::Get(),
-                                         untrack_and_delete_flag));
+      new base::ScopedClosureRunner(
+          Bind(&RunOrPostToTaskRunner,
+               RetainedRef(base::ThreadTaskRunnerHandle::Get()),
+               untrack_and_delete_flag));
 
   *is_canceled_cb =
       Bind(&IsCanceled, flag, base::Owned(untrack_and_delete_flag_runner));
diff --git a/base/task_runner.h b/base/task_runner.h
index 6dd82cc..9593835 100644
--- a/base/task_runner.h
+++ b/base/task_runner.h
@@ -9,13 +9,10 @@
 
 #include "base/base_export.h"
 #include "base/callback_forward.h"
+#include "base/location.h"
 #include "base/memory/ref_counted.h"
 #include "base/time/time.h"
 
-namespace tracked_objects {
-class Location;
-} // namespace tracked_objects
-
 namespace base {
 
 struct TaskRunnerTraits;
diff --git a/base/task_runner_util.h b/base/task_runner_util.h
index da088db..ba8e120 100644
--- a/base/task_runner_util.h
+++ b/base/task_runner_util.h
@@ -7,7 +7,6 @@
 
 #include "base/bind.h"
 #include "base/bind_helpers.h"
-#include "base/callback_internal.h"
 #include "base/logging.h"
 #include "base/task_runner.h"
 
@@ -32,7 +31,7 @@
   // current code that relies on this API softness has been removed.
   // http://crbug.com/162712
   if (!callback.is_null())
-    callback.Run(CallbackForward(*result));
+    callback.Run(std::move(*result));
 }
 
 }  // namespace internal
diff --git a/base/task_runner_util_unittest.cc b/base/task_runner_util_unittest.cc
index 0a4f22e..1df5436 100644
--- a/base/task_runner_util_unittest.cc
+++ b/base/task_runner_util_unittest.cc
@@ -36,13 +36,13 @@
   }
 };
 
-scoped_ptr<Foo> CreateFoo() {
-  return scoped_ptr<Foo>(new Foo);
+std::unique_ptr<Foo> CreateFoo() {
+  return std::unique_ptr<Foo>(new Foo);
 }
 
-void ExpectFoo(scoped_ptr<Foo> foo) {
+void ExpectFoo(std::unique_ptr<Foo> foo) {
   EXPECT_TRUE(foo.get());
-  scoped_ptr<Foo> local_foo(std::move(foo));
+  std::unique_ptr<Foo> local_foo(std::move(foo));
   EXPECT_TRUE(local_foo.get());
   EXPECT_FALSE(foo.get());
 }
@@ -54,13 +54,13 @@
   };
 };
 
-scoped_ptr<Foo, FooDeleter> CreateScopedFoo() {
-  return scoped_ptr<Foo, FooDeleter>(new Foo);
+std::unique_ptr<Foo, FooDeleter> CreateScopedFoo() {
+  return std::unique_ptr<Foo, FooDeleter>(new Foo);
 }
 
-void ExpectScopedFoo(scoped_ptr<Foo, FooDeleter> foo) {
+void ExpectScopedFoo(std::unique_ptr<Foo, FooDeleter> foo) {
   EXPECT_TRUE(foo.get());
-  scoped_ptr<Foo, FooDeleter> local_foo(std::move(foo));
+  std::unique_ptr<Foo, FooDeleter> local_foo(std::move(foo));
   EXPECT_TRUE(local_foo.get());
   EXPECT_FALSE(foo.get());
 }
diff --git a/base/task_scheduler/OWNERS b/base/task_scheduler/OWNERS
new file mode 100644
index 0000000..e4b383c
--- /dev/null
+++ b/base/task_scheduler/OWNERS
@@ -0,0 +1,3 @@
+fdoray@chromium.org
+gab@chromium.org
+robliao@chromium.org
diff --git a/base/task_scheduler/scheduler_lock.h b/base/task_scheduler/scheduler_lock.h
new file mode 100644
index 0000000..c969eb1
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock.h
@@ -0,0 +1,88 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+namespace base {
+namespace internal {
+
+// SchedulerLock should be used anywhere a lock would be used in the scheduler.
+// When DCHECK_IS_ON(), lock checking occurs. Otherwise, SchedulerLock is
+// equivalent to base::Lock.
+//
+// The shape of SchedulerLock is as follows:
+// SchedulerLock()
+//     Default constructor, no predecessor lock.
+//     DCHECKs
+//         On Acquisition if any scheduler lock is acquired on this thread.
+//
+// SchedulerLock(const SchedulerLock* predecessor)
+//     Constructor that specifies an allowed predecessor for that lock.
+//     DCHECKs
+//         On Construction if |predecessor| forms a predecessor lock cycle.
+//         On Acquisition if the previous lock acquired on the thread is not
+//             |predecessor|. Okay if there was no previous lock acquired.
+//
+// void Acquire()
+//     Acquires the lock.
+//
+// void Release()
+//     Releases the lock.
+//
+// void AssertAcquired().
+//     DCHECKs if the lock is not acquired.
+//
+// std::unique_ptr<ConditionVariable> CreateConditionVariable()
+//     Creates a condition variable using this as a lock.
+
+#if DCHECK_IS_ON()
+class SchedulerLock : public SchedulerLockImpl {
+ public:
+  SchedulerLock() = default;
+  explicit SchedulerLock(const SchedulerLock* predecessor)
+      : SchedulerLockImpl(predecessor) {}
+};
+#else  // DCHECK_IS_ON()
+class SchedulerLock : public Lock {
+ public:
+  SchedulerLock() = default;
+  explicit SchedulerLock(const SchedulerLock*) {}
+
+  std::unique_ptr<ConditionVariable> CreateConditionVariable() {
+    return std::unique_ptr<ConditionVariable>(new ConditionVariable(this));
+  }
+};
+#endif  // DCHECK_IS_ON()
+
+// Provides the same functionality as base::AutoLock for SchedulerLock.
+class AutoSchedulerLock {
+ public:
+  explicit AutoSchedulerLock(SchedulerLock& lock) : lock_(lock) {
+    lock_.Acquire();
+  }
+
+  ~AutoSchedulerLock() {
+    lock_.AssertAcquired();
+    lock_.Release();
+  }
+
+ private:
+  SchedulerLock& lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(AutoSchedulerLock);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
diff --git a/base/task_scheduler/scheduler_lock_impl.cc b/base/task_scheduler/scheduler_lock_impl.cc
new file mode 100644
index 0000000..7480e18
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_impl.cc
@@ -0,0 +1,145 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class SafeAcquisitionTracker {
+ public:
+  SafeAcquisitionTracker() : tls_acquired_locks_(&OnTLSDestroy) {}
+
+  void RegisterLock(
+      const SchedulerLockImpl* const lock,
+      const SchedulerLockImpl* const predecessor) {
+    DCHECK_NE(lock, predecessor) << "Reentrant locks are unsupported.";
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    allowed_predecessor_map_[lock] = predecessor;
+    AssertSafePredecessor(lock);
+  }
+
+  void UnregisterLock(const SchedulerLockImpl* const lock) {
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    allowed_predecessor_map_.erase(lock);
+  }
+
+  void RecordAcquisition(const SchedulerLockImpl* const lock) {
+    AssertSafeAcquire(lock);
+    GetAcquiredLocksOnCurrentThread()->push_back(lock);
+  }
+
+  void RecordRelease(const SchedulerLockImpl* const lock) {
+    LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+    const auto iter_at_lock =
+        std::find(acquired_locks->begin(), acquired_locks->end(), lock);
+    DCHECK(iter_at_lock != acquired_locks->end());
+    acquired_locks->erase(iter_at_lock);
+  }
+
+ private:
+  using LockVector = std::vector<const SchedulerLockImpl*>;
+  using PredecessorMap = std::unordered_map<
+      const SchedulerLockImpl*, const SchedulerLockImpl*>;
+
+  // This asserts that the lock is safe to acquire. This means that this should
+  // be run before actually recording the acquisition.
+  void AssertSafeAcquire(const SchedulerLockImpl* const lock) {
+    const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+
+    // If the thread currently holds no locks, this is inherently safe.
+    if (acquired_locks->empty())
+      return;
+
+    // Otherwise, make sure that the previous lock acquired is an allowed
+    // predecessor.
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    const SchedulerLockImpl* allowed_predecessor =
+        allowed_predecessor_map_.at(lock);
+    DCHECK_EQ(acquired_locks->back(), allowed_predecessor);
+  }
+
+  void AssertSafePredecessor(const SchedulerLockImpl* lock) const {
+    allowed_predecessor_map_lock_.AssertAcquired();
+    for (const SchedulerLockImpl* predecessor =
+             allowed_predecessor_map_.at(lock);
+         predecessor != nullptr;
+         predecessor = allowed_predecessor_map_.at(predecessor)) {
+      DCHECK_NE(predecessor, lock) <<
+          "Scheduler lock predecessor cycle detected.";
+    }
+  }
+
+  LockVector* GetAcquiredLocksOnCurrentThread() {
+    if (!tls_acquired_locks_.Get())
+      tls_acquired_locks_.Set(new LockVector);
+
+    return reinterpret_cast<LockVector*>(tls_acquired_locks_.Get());
+  }
+
+  static void OnTLSDestroy(void* value) {
+    delete reinterpret_cast<LockVector*>(value);
+  }
+
+  // Synchronizes access to |allowed_predecessor_map_|.
+  Lock allowed_predecessor_map_lock_;
+
+  // A map of allowed predecessors.
+  PredecessorMap allowed_predecessor_map_;
+
+  // A thread-local slot holding a vector of locks currently acquired on the
+  // current thread.
+  ThreadLocalStorage::Slot tls_acquired_locks_;
+
+  DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
+};
+
+LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+SchedulerLockImpl::SchedulerLockImpl() : SchedulerLockImpl(nullptr) {}
+
+SchedulerLockImpl::SchedulerLockImpl(const SchedulerLockImpl* predecessor) {
+  g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
+}
+
+SchedulerLockImpl::~SchedulerLockImpl() {
+  g_safe_acquisition_tracker.Get().UnregisterLock(this);
+}
+
+void SchedulerLockImpl::Acquire() {
+  lock_.Acquire();
+  g_safe_acquisition_tracker.Get().RecordAcquisition(this);
+}
+
+void SchedulerLockImpl::Release() {
+  lock_.Release();
+  g_safe_acquisition_tracker.Get().RecordRelease(this);
+}
+
+void SchedulerLockImpl::AssertAcquired() const {
+  lock_.AssertAcquired();
+}
+
+std::unique_ptr<ConditionVariable>
+SchedulerLockImpl::CreateConditionVariable() {
+  return std::unique_ptr<ConditionVariable>(new ConditionVariable(&lock_));
+}
+
+}  // namespace internal
+}  // base
diff --git a/base/task_scheduler/scheduler_lock_impl.h b/base/task_scheduler/scheduler_lock_impl.h
new file mode 100644
index 0000000..65699bb
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_impl.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class ConditionVariable;
+
+namespace internal {
+
+// A regular lock with simple deadlock correctness checking.
+// This lock tracks all of the available locks to make sure that any locks are
+// acquired in an expected order.
+// See scheduler_lock.h for details.
+class BASE_EXPORT SchedulerLockImpl {
+ public:
+  SchedulerLockImpl();
+  explicit SchedulerLockImpl(const SchedulerLockImpl* predecessor);
+  ~SchedulerLockImpl();
+
+  void Acquire();
+  void Release();
+
+  void AssertAcquired() const;
+
+  std::unique_ptr<ConditionVariable> CreateConditionVariable();
+
+ private:
+  Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerLockImpl);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
diff --git a/base/task_scheduler/scheduler_lock_unittest.cc b/base/task_scheduler/scheduler_lock_unittest.cc
new file mode 100644
index 0000000..daa5025
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_unittest.cc
@@ -0,0 +1,296 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock.h"
+
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/rand_util.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/test_utils.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+namespace {
+
+// Adapted from base::Lock's BasicLockTestThread to make sure
+// Acquire()/Release() don't crash.
+class BasicLockTestThread : public SimpleThread {
+ public:
+  explicit BasicLockTestThread(SchedulerLock* lock)
+      : SimpleThread("BasicLockTestThread"),
+        lock_(lock),
+        acquired_(0) {}
+
+  int acquired() const { return acquired_; }
+
+ private:
+  void Run() override {
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      lock_->Release();
+    }
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+      lock_->Release();
+    }
+  }
+
+  SchedulerLock* const lock_;
+  int acquired_;
+
+  DISALLOW_COPY_AND_ASSIGN(BasicLockTestThread);
+};
+
+class BasicLockAcquireAndWaitThread : public SimpleThread {
+ public:
+  explicit BasicLockAcquireAndWaitThread(SchedulerLock* lock)
+      : SimpleThread("BasicLockAcquireAndWaitThread"),
+        lock_(lock),
+        lock_acquire_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                            WaitableEvent::InitialState::NOT_SIGNALED),
+        main_thread_continue_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED) {
+  }
+
+  void WaitForLockAcquisition() {
+    lock_acquire_event_.Wait();
+  }
+
+  void ContinueMain() {
+    main_thread_continue_event_.Signal();
+  }
+
+ private:
+  void Run() override {
+    lock_->Acquire();
+    lock_acquire_event_.Signal();
+    main_thread_continue_event_.Wait();
+    lock_->Release();
+  }
+
+  SchedulerLock* const lock_;
+  WaitableEvent lock_acquire_event_;
+  WaitableEvent main_thread_continue_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(BasicLockAcquireAndWaitThread);
+};
+
+TEST(TaskSchedulerLock, Basic) {
+  SchedulerLock lock;
+  BasicLockTestThread thread(&lock);
+
+  thread.Start();
+
+  int acquired = 0;
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    lock.Release();
+  }
+  for (int i = 0; i < 10; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+    lock.Release();
+  }
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+    lock.Release();
+  }
+
+  thread.Join();
+
+  EXPECT_EQ(acquired, 20);
+  EXPECT_EQ(thread.acquired(), 20);
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessor) {
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  lock.Acquire();
+  lock.Release();
+  predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessorWrongOrder) {
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  EXPECT_DCHECK_DEATH({
+    lock.Acquire();
+    predecessor.Acquire();
+  }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireNonPredecessor) {
+  SchedulerLock lock1;
+  SchedulerLock lock2;
+  EXPECT_DCHECK_DEATH({
+    lock1.Acquire();
+    lock2.Acquire();
+  }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInOrder) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  lock1.Acquire();
+  lock2.Acquire();
+  lock3.Acquire();
+  lock3.Release();
+  lock2.Release();
+  lock1.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInTheMiddleOfAChain) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  lock2.Acquire();
+  lock3.Acquire();
+  lock3.Release();
+  lock2.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksNoTransitivity) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  EXPECT_DCHECK_DEATH({
+    lock1.Acquire();
+    lock3.Acquire();
+  }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireLocksDifferentThreadsSafely) {
+  SchedulerLock lock1;
+  SchedulerLock lock2;
+  BasicLockAcquireAndWaitThread thread(&lock1);
+  thread.Start();
+
+  lock2.Acquire();
+  thread.WaitForLockAcquisition();
+  thread.ContinueMain();
+  thread.Join();
+  lock2.Release();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorFirst) {
+  // A lock and its predecessor may be safely acquired on different threads.
+  // This Thread                Other Thread
+  // predecessor.Acquire()
+  //                            lock.Acquire()
+  // predecessor.Release()
+  //                            lock.Release()
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  BasicLockAcquireAndWaitThread thread(&lock);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  predecessor.Release();
+  thread.ContinueMain();
+  thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorLast) {
+  // A lock and its predecessor may be safely acquired on different threads.
+  // This Thread                Other Thread
+  // lock.Acquire()
+  //                            predecessor.Acquire()
+  // lock.Release()
+  //                            predecessor.Release()
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  lock.Acquire();
+  BasicLockAcquireAndWaitThread thread(&predecessor);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  lock.Release();
+  thread.ContinueMain();
+  thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyNoInterference) {
+  // Acquisition of an unrelated lock on another thread should not affect a
+  // legal lock acquisition with a predecessor on this thread.
+  // This Thread                Other Thread
+  // predecessor.Acquire()
+  //                            unrelated.Acquire()
+  // lock.Acquire()
+  //                            unrelated.Release()
+  // lock.Release()
+  // predecessor.Release();
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  SchedulerLock unrelated;
+  BasicLockAcquireAndWaitThread thread(&unrelated);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  lock.Acquire();
+  thread.ContinueMain();
+  thread.Join();
+  lock.Release();
+  predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, SelfReferentialLock) {
+  struct SelfReferentialLock {
+    SelfReferentialLock() : lock(&lock) {}
+
+    SchedulerLock lock;
+  };
+
+  EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; }, "");
+}
+
+TEST(TaskSchedulerLock, PredecessorCycle) {
+  struct LockCycle {
+    LockCycle() : lock1(&lock2), lock2(&lock1) {}
+
+    SchedulerLock lock1;
+    SchedulerLock lock2;
+  };
+
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+}
+
+TEST(TaskSchedulerLock, PredecessorLongerCycle) {
+  struct LockCycle {
+    LockCycle()
+        : lock1(&lock5),
+          lock2(&lock1),
+          lock3(&lock2),
+          lock4(&lock3),
+          lock5(&lock4) {}
+
+    SchedulerLock lock1;
+    SchedulerLock lock2;
+    SchedulerLock lock3;
+    SchedulerLock lock4;
+    SchedulerLock lock5;
+  };
+
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+}
+
+}  // namespace
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
new file mode 100644
index 0000000..4ecb605
--- /dev/null
+++ b/base/task_scheduler/sequence.cc
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+Sequence::Sequence() = default;
+
+bool Sequence::PushTask(std::unique_ptr<Task> task) {
+  DCHECK(task->sequenced_time.is_null());
+  task->sequenced_time = base::TimeTicks::Now();
+
+  AutoSchedulerLock auto_lock(lock_);
+  ++num_tasks_per_priority_[static_cast<int>(task->traits.priority())];
+  queue_.push(std::move(task));
+
+  // Return true if the sequence was empty before the push.
+  return queue_.size() == 1;
+}
+
+const Task* Sequence::PeekTask() const {
+  AutoSchedulerLock auto_lock(lock_);
+
+  if (queue_.empty())
+    return nullptr;
+
+  return queue_.front().get();
+}
+
+bool Sequence::PopTask() {
+  AutoSchedulerLock auto_lock(lock_);
+  DCHECK(!queue_.empty());
+
+  const int priority_index =
+      static_cast<int>(queue_.front()->traits.priority());
+  DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
+  --num_tasks_per_priority_[priority_index];
+
+  queue_.pop();
+  return queue_.empty();
+}
+
+SequenceSortKey Sequence::GetSortKey() const {
+  TaskPriority priority = TaskPriority::LOWEST;
+  base::TimeTicks next_task_sequenced_time;
+
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    DCHECK(!queue_.empty());
+
+    // Find the highest task priority in the sequence.
+    const int highest_priority_index = static_cast<int>(TaskPriority::HIGHEST);
+    const int lowest_priority_index = static_cast<int>(TaskPriority::LOWEST);
+    for (int i = highest_priority_index; i > lowest_priority_index; --i) {
+      if (num_tasks_per_priority_[i] > 0) {
+        priority = static_cast<TaskPriority>(i);
+        break;
+      }
+    }
+
+    // Save the sequenced time of the next task in the sequence.
+    next_task_sequenced_time = queue_.front()->sequenced_time;
+  }
+
+  return SequenceSortKey(priority, next_task_sequenced_time);
+}
+
+Sequence::~Sequence() = default;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
new file mode 100644
index 0000000..3fa037f
--- /dev/null
+++ b/base/task_scheduler/sequence.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <queue>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+namespace internal {
+
+// A sequence holds tasks that must be executed in posting order.
+//
+// Note: there is a known refcounted-ownership cycle in the Scheduler
+// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
+// This is okay so long as the other owners of Sequence (PriorityQueue and
+// SchedulerWorker in alternation and
+// SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork()
+// temporarily) keep running it (and taking Tasks from it as a result). A
+// dangling reference cycle would only occur should they release their reference
+// to it while it's not empty. In other words, it is only correct for them to
+// release it after PopTask() returns false to indicate it was made empty by
+// that call (in which case the next PushTask() will return true to indicate to
+// the caller that the Sequence should be re-enqueued for execution).
+//
+// This class is thread-safe.
+class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
+ public:
+  Sequence();
+
+  // Adds |task| at the end of the sequence's queue. Returns true if the
+  // sequence was empty before this operation.
+  bool PushTask(std::unique_ptr<Task> task);
+
+  // Returns the task in front of the sequence's queue, if any.
+  const Task* PeekTask() const;
+
+  // Removes the task in front of the sequence's queue. Returns true if the
+  // sequence is empty after this operation. Cannot be called on an empty
+  // sequence.
+  bool PopTask();
+
+  // Returns a SequenceSortKey representing the priority of the sequence. Cannot
+  // be called on an empty sequence.
+  SequenceSortKey GetSortKey() const;
+
+ private:
+  friend class RefCountedThreadSafe<Sequence>;
+  ~Sequence();
+
+  // Synchronizes access to all members.
+  mutable SchedulerLock lock_;
+
+  // Queue of tasks to execute.
+  std::queue<std::unique_ptr<Task>> queue_;
+
+  // Number of tasks contained in the sequence for each priority.
+  size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
+      {};
+
+  DISALLOW_COPY_AND_ASSIGN(Sequence);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SEQUENCE_H_
diff --git a/base/task_scheduler/sequence_sort_key.cc b/base/task_scheduler/sequence_sort_key.cc
new file mode 100644
index 0000000..e356c8b
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+namespace base {
+namespace internal {
+
+SequenceSortKey::SequenceSortKey(TaskPriority priority,
+                                 TimeTicks next_task_sequenced_time)
+    : priority_(priority),
+      next_task_sequenced_time_(next_task_sequenced_time) {}
+
+bool SequenceSortKey::operator<(const SequenceSortKey& other) const {
+  // This SequenceSortKey is considered less important than |other| if it has a
+  // lower priority or if it has the same priority but its next task was posted
+  // later than |other|'s.
+  const int priority_diff =
+      static_cast<int>(priority_) - static_cast<int>(other.priority_);
+  if (priority_diff < 0)
+    return true;
+  if (priority_diff > 0)
+    return false;
+  return next_task_sequenced_time_ > other.next_task_sequenced_time_;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence_sort_key.h b/base/task_scheduler/sequence_sort_key.h
new file mode 100644
index 0000000..eb81708
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key.h
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+
+#include "base/base_export.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// An immutable but assignable representation of the priority of a Sequence.
+class BASE_EXPORT SequenceSortKey final {
+ public:
+  SequenceSortKey(TaskPriority priority, TimeTicks next_task_sequenced_time);
+
+  TaskPriority priority() const { return priority_; }
+
+  bool operator<(const SequenceSortKey& other) const;
+  bool operator>(const SequenceSortKey& other) const { return other < *this; }
+
+  bool operator==(const SequenceSortKey& other) const {
+    return priority_ == other.priority_ &&
+           next_task_sequenced_time_ == other.next_task_sequenced_time_;
+  }
+  bool operator!=(const SequenceSortKey& other) const {
+    return !(other == *this);
+  };
+
+ private:
+  // The private section allows this class to keep its immutable property while
+  // being copy-assignable (i.e. instead of making its members const).
+
+  // Highest task priority in the sequence at the time this sort key was
+  // created.
+  TaskPriority priority_;
+
+  // Sequenced time of the next task to run in the sequence at the time this
+  // sort key was created.
+  TimeTicks next_task_sequenced_time_;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
diff --git a/base/task_scheduler/sequence_sort_key_unittest.cc b/base/task_scheduler/sequence_sort_key_unittest.cc
new file mode 100644
index 0000000..2c1d80d
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key_unittest.cc
@@ -0,0 +1,243 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorLessThan) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a < key_a);
+  EXPECT_LT(key_b, key_a);
+  EXPECT_LT(key_c, key_a);
+  EXPECT_LT(key_d, key_a);
+  EXPECT_LT(key_e, key_a);
+  EXPECT_LT(key_f, key_a);
+
+  EXPECT_FALSE(key_a < key_b);
+  EXPECT_FALSE(key_b < key_b);
+  EXPECT_LT(key_c, key_b);
+  EXPECT_LT(key_d, key_b);
+  EXPECT_LT(key_e, key_b);
+  EXPECT_LT(key_f, key_b);
+
+  EXPECT_FALSE(key_a < key_c);
+  EXPECT_FALSE(key_b < key_c);
+  EXPECT_FALSE(key_c < key_c);
+  EXPECT_LT(key_d, key_c);
+  EXPECT_LT(key_e, key_c);
+  EXPECT_LT(key_f, key_c);
+
+  EXPECT_FALSE(key_a < key_d);
+  EXPECT_FALSE(key_b < key_d);
+  EXPECT_FALSE(key_c < key_d);
+  EXPECT_FALSE(key_d < key_d);
+  EXPECT_LT(key_e, key_d);
+  EXPECT_LT(key_f, key_d);
+
+  EXPECT_FALSE(key_a < key_e);
+  EXPECT_FALSE(key_b < key_e);
+  EXPECT_FALSE(key_c < key_e);
+  EXPECT_FALSE(key_d < key_e);
+  EXPECT_FALSE(key_e < key_e);
+  EXPECT_LT(key_f, key_e);
+
+  EXPECT_FALSE(key_a < key_f);
+  EXPECT_FALSE(key_b < key_f);
+  EXPECT_FALSE(key_c < key_f);
+  EXPECT_FALSE(key_d < key_f);
+  EXPECT_FALSE(key_e < key_f);
+  EXPECT_FALSE(key_f < key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorGreaterThan) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a > key_a);
+  EXPECT_FALSE(key_b > key_a);
+  EXPECT_FALSE(key_c > key_a);
+  EXPECT_FALSE(key_d > key_a);
+  EXPECT_FALSE(key_e > key_a);
+  EXPECT_FALSE(key_f > key_a);
+
+  EXPECT_GT(key_a, key_b);
+  EXPECT_FALSE(key_b > key_b);
+  EXPECT_FALSE(key_c > key_b);
+  EXPECT_FALSE(key_d > key_b);
+  EXPECT_FALSE(key_e > key_b);
+  EXPECT_FALSE(key_f > key_b);
+
+  EXPECT_GT(key_a, key_c);
+  EXPECT_GT(key_b, key_c);
+  EXPECT_FALSE(key_c > key_c);
+  EXPECT_FALSE(key_d > key_c);
+  EXPECT_FALSE(key_e > key_c);
+  EXPECT_FALSE(key_f > key_c);
+
+  EXPECT_GT(key_a, key_d);
+  EXPECT_GT(key_b, key_d);
+  EXPECT_GT(key_c, key_d);
+  EXPECT_FALSE(key_d > key_d);
+  EXPECT_FALSE(key_e > key_d);
+  EXPECT_FALSE(key_f > key_d);
+
+  EXPECT_GT(key_a, key_e);
+  EXPECT_GT(key_b, key_e);
+  EXPECT_GT(key_c, key_e);
+  EXPECT_GT(key_d, key_e);
+  EXPECT_FALSE(key_e > key_e);
+  EXPECT_FALSE(key_f > key_e);
+
+  EXPECT_GT(key_a, key_f);
+  EXPECT_GT(key_b, key_f);
+  EXPECT_GT(key_c, key_f);
+  EXPECT_GT(key_d, key_f);
+  EXPECT_GT(key_e, key_f);
+  EXPECT_FALSE(key_f > key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorEqual) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_EQ(key_a, key_a);
+  EXPECT_FALSE(key_b == key_a);
+  EXPECT_FALSE(key_c == key_a);
+  EXPECT_FALSE(key_d == key_a);
+  EXPECT_FALSE(key_e == key_a);
+  EXPECT_FALSE(key_f == key_a);
+
+  EXPECT_FALSE(key_a == key_b);
+  EXPECT_EQ(key_b, key_b);
+  EXPECT_FALSE(key_c == key_b);
+  EXPECT_FALSE(key_d == key_b);
+  EXPECT_FALSE(key_e == key_b);
+  EXPECT_FALSE(key_f == key_b);
+
+  EXPECT_FALSE(key_a == key_c);
+  EXPECT_FALSE(key_b == key_c);
+  EXPECT_EQ(key_c, key_c);
+  EXPECT_FALSE(key_d == key_c);
+  EXPECT_FALSE(key_e == key_c);
+  EXPECT_FALSE(key_f == key_c);
+
+  EXPECT_FALSE(key_a == key_d);
+  EXPECT_FALSE(key_b == key_d);
+  EXPECT_FALSE(key_c == key_d);
+  EXPECT_EQ(key_d, key_d);
+  EXPECT_FALSE(key_e == key_d);
+  EXPECT_FALSE(key_f == key_d);
+
+  EXPECT_FALSE(key_a == key_e);
+  EXPECT_FALSE(key_b == key_e);
+  EXPECT_FALSE(key_c == key_e);
+  EXPECT_FALSE(key_d == key_e);
+  EXPECT_EQ(key_e, key_e);
+  EXPECT_FALSE(key_f == key_e);
+
+  EXPECT_FALSE(key_a == key_f);
+  EXPECT_FALSE(key_b == key_f);
+  EXPECT_FALSE(key_c == key_f);
+  EXPECT_FALSE(key_d == key_f);
+  EXPECT_FALSE(key_e == key_f);
+  EXPECT_EQ(key_f, key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorNotEqual) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a != key_a);
+  EXPECT_NE(key_b, key_a);
+  EXPECT_NE(key_c, key_a);
+  EXPECT_NE(key_d, key_a);
+  EXPECT_NE(key_e, key_a);
+  EXPECT_NE(key_f, key_a);
+
+  EXPECT_NE(key_a, key_b);
+  EXPECT_FALSE(key_b != key_b);
+  EXPECT_NE(key_c, key_b);
+  EXPECT_NE(key_d, key_b);
+  EXPECT_NE(key_e, key_b);
+  EXPECT_NE(key_f, key_b);
+
+  EXPECT_NE(key_a, key_c);
+  EXPECT_NE(key_b, key_c);
+  EXPECT_FALSE(key_c != key_c);
+  EXPECT_NE(key_d, key_c);
+  EXPECT_NE(key_e, key_c);
+  EXPECT_NE(key_f, key_c);
+
+  EXPECT_NE(key_a, key_d);
+  EXPECT_NE(key_b, key_d);
+  EXPECT_NE(key_c, key_d);
+  EXPECT_FALSE(key_d != key_d);
+  EXPECT_NE(key_e, key_d);
+  EXPECT_NE(key_f, key_d);
+
+  EXPECT_NE(key_a, key_e);
+  EXPECT_NE(key_b, key_e);
+  EXPECT_NE(key_c, key_e);
+  EXPECT_NE(key_d, key_e);
+  EXPECT_FALSE(key_e != key_e);
+  EXPECT_NE(key_f, key_e);
+
+  EXPECT_NE(key_a, key_f);
+  EXPECT_NE(key_b, key_f);
+  EXPECT_NE(key_c, key_f);
+  EXPECT_NE(key_d, key_f);
+  EXPECT_NE(key_e, key_f);
+  EXPECT_FALSE(key_f != key_f);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
new file mode 100644
index 0000000..6a15299
--- /dev/null
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -0,0 +1,189 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class TaskSchedulerSequenceTest : public testing::Test {
+ public:
+  TaskSchedulerSequenceTest()
+      : task_a_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::BACKGROUND),
+                     TimeDelta())),
+        task_b_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
+                     TimeDelta())),
+        task_c_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
+                     TimeDelta())),
+        task_d_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
+                     TimeDelta())),
+        task_e_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::BACKGROUND),
+                     TimeDelta())),
+        task_a_(task_a_owned_.get()),
+        task_b_(task_b_owned_.get()),
+        task_c_(task_c_owned_.get()),
+        task_d_(task_d_owned_.get()),
+        task_e_(task_e_owned_.get()) {}
+
+ protected:
+  // Tasks to be handed off to a Sequence for testing.
+  std::unique_ptr<Task> task_a_owned_;
+  std::unique_ptr<Task> task_b_owned_;
+  std::unique_ptr<Task> task_c_owned_;
+  std::unique_ptr<Task> task_d_owned_;
+  std::unique_ptr<Task> task_e_owned_;
+
+  // Raw pointers to those same tasks for verification. This is needed because
+  // the scoped_ptrs above no longer point to the tasks once they have been
+  // moved into a Sequence.
+  const Task* task_a_;
+  const Task* task_b_;
+  const Task* task_c_;
+  const Task* task_d_;
+  const Task* task_e_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSequenceTest);
+};
+
+}  // namespace
+
+TEST_F(TaskSchedulerSequenceTest, PushPopPeek) {
+  scoped_refptr<Sequence> sequence(new Sequence);
+
+  // Push task A in the sequence. Its sequenced time should be updated and it
+  // should be in front of the sequence.
+  EXPECT_TRUE(sequence->PushTask(std::move(task_a_owned_)));
+  EXPECT_FALSE(task_a_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  // Push task B, C and D in the sequence. Their sequenced time should be
+  // updated and task A should always remain in front of the sequence.
+  EXPECT_FALSE(sequence->PushTask(std::move(task_b_owned_)));
+  EXPECT_FALSE(task_b_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  EXPECT_FALSE(sequence->PushTask(std::move(task_c_owned_)));
+  EXPECT_FALSE(task_c_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  EXPECT_FALSE(sequence->PushTask(std::move(task_d_owned_)));
+  EXPECT_FALSE(task_d_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  // Pop task A. Task B should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_b_, sequence->PeekTask());
+
+  // Pop task B. Task C should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_c_, sequence->PeekTask());
+
+  // Pop task C. Task D should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_d_, sequence->PeekTask());
+
+  // Push task E in the sequence. Its sequenced time should be updated and
+  // task D should remain in front.
+  EXPECT_FALSE(sequence->PushTask(std::move(task_e_owned_)));
+  EXPECT_FALSE(task_e_->sequenced_time.is_null());
+  EXPECT_EQ(task_d_, sequence->PeekTask());
+
+  // Pop task D. Task E should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_e_, sequence->PeekTask());
+
+  // Pop task E. The sequence should now be empty.
+  EXPECT_TRUE(sequence->PopTask());
+  EXPECT_EQ(nullptr, sequence->PeekTask());
+}
+
+TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
+  scoped_refptr<Sequence> sequence(new Sequence);
+
+  // Push task A in the sequence. The highest priority is from task A
+  // (BACKGROUND). Task A is in front of the sequence.
+  sequence->PushTask(std::move(task_a_owned_));
+  EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_a_->sequenced_time),
+            sequence->GetSortKey());
+
+  // Push task B in the sequence. The highest priority is from task B
+  // (USER_VISIBLE). Task A is still in front of the sequence.
+  sequence->PushTask(std::move(task_b_owned_));
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_VISIBLE, task_a_->sequenced_time),
+      sequence->GetSortKey());
+
+  // Push task C in the sequence. The highest priority is from task C
+  // (USER_BLOCKING). Task A is still in front of the sequence.
+  sequence->PushTask(std::move(task_c_owned_));
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
+      sequence->GetSortKey());
+
+  // Push task D in the sequence. The highest priority is from tasks C/D
+  // (USER_BLOCKING). Task A is still in front of the sequence.
+  sequence->PushTask(std::move(task_d_owned_));
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
+      sequence->GetSortKey());
+
+  // Pop task A. The highest priority is still USER_BLOCKING. The task in front
+  // of the sequence is now task B.
+  sequence->PopTask();
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time),
+      sequence->GetSortKey());
+
+  // Pop task B. The highest priority is still USER_BLOCKING. The task in front
+  // of the sequence is now task C.
+  sequence->PopTask();
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time),
+      sequence->GetSortKey());
+
+  // Pop task C. The highest priority is still USER_BLOCKING. The task in front
+  // of the sequence is now task D.
+  sequence->PopTask();
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
+      sequence->GetSortKey());
+
+  // Push task E in the sequence. The highest priority is still USER_BLOCKING.
+  // The task in front of the sequence is still task D.
+  sequence->PushTask(std::move(task_e_owned_));
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
+      sequence->GetSortKey());
+
+  // Pop task D. The highest priority is now from task E (BACKGROUND). The
+  // task in front of the sequence is now task E.
+  sequence->PopTask();
+  EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time),
+            sequence->GetSortKey());
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
new file mode 100644
index 0000000..8a589a2
--- /dev/null
+++ b/base/task_scheduler/task.cc
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task.h"
+
+namespace base {
+namespace internal {
+
+Task::Task(const tracked_objects::Location& posted_from,
+           const Closure& task,
+           const TaskTraits& traits,
+           const TimeDelta& delay)
+    : PendingTask(posted_from,
+                  task,
+                  delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
+                  false),  // Not nestable.
+      traits(traits) {}
+
+Task::~Task() = default;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
new file mode 100644
index 0000000..2b53c69
--- /dev/null
+++ b/base/task_scheduler/task.h
@@ -0,0 +1,64 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_H_
+#define BASE_TASK_SCHEDULER_TASK_H_
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// A task is a unit of work inside the task scheduler. Support for tracing and
+// profiling inherited from PendingTask.
+struct BASE_EXPORT Task : public PendingTask {
+  // |posted_from| is the site the task was posted from. |task| is the closure
+  // to run. |traits| is metadata about the task. |delay| is a delay that must
+  // expire before the Task runs.
+  Task(const tracked_objects::Location& posted_from,
+       const Closure& task,
+       const TaskTraits& traits,
+       const TimeDelta& delay);
+  ~Task();
+
+  // The TaskTraits of this task.
+  const TaskTraits traits;
+
+  // The time at which the task was inserted in its sequence. For an undelayed
+  // task, this happens at post time. For a delayed task, this happens some
+  // time after the task's delay has expired. If the task hasn't been inserted
+  // in a sequence yet, this defaults to a null TimeTicks.
+  TimeTicks sequenced_time;
+
+  // A reference to the SequencedTaskRunner or SingleThreadTaskRunner that
+  // posted this task, if any. Used to set ThreadTaskRunnerHandle and/or
+  // SequencedTaskRunnerHandle while the task is running.
+  // Note: this creates an ownership cycle
+  //   Sequence -> Task -> TaskRunner -> Sequence -> ...
+  // but that's okay as it's broken when the Task is popped from its Sequence
+  // after being executed which means this cycle forces the TaskRunner to stick
+  // around until all its tasks have been executed which is a requirement to
+  // support TaskRunnerHandles.
+  scoped_refptr<SequencedTaskRunner> sequenced_task_runner_ref;
+  scoped_refptr<SingleThreadTaskRunner> single_thread_task_runner_ref;
+
+ private:
+  // Disallow copies to make sure no unnecessary ref-bumps are incurred. Making
+  // it move-only would be an option, but isn't necessary for now.
+  DISALLOW_COPY_AND_ASSIGN(Task);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_H_
diff --git a/base/task_scheduler/task_traits.cc b/base/task_scheduler/task_traits.cc
new file mode 100644
index 0000000..dd55535
--- /dev/null
+++ b/base/task_scheduler/task_traits.cc
@@ -0,0 +1,70 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_traits.h"
+
+#include <stddef.h>
+
+#include <ostream>
+
+namespace base {
+
+// Do not rely on defaults hard-coded below beyond the guarantees described in
+// the header; anything else is subject to change. Tasks should explicitly
+// request defaults if the behavior is critical to the task.
+TaskTraits::TaskTraits()
+    : with_file_io_(false),
+      priority_(TaskPriority::BACKGROUND),
+      shutdown_behavior_(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {}
+
+TaskTraits::~TaskTraits() = default;
+
+TaskTraits& TaskTraits::WithFileIO() {
+  with_file_io_ = true;
+  return *this;
+}
+
+TaskTraits& TaskTraits::WithPriority(TaskPriority priority) {
+  priority_ = priority;
+  return *this;
+}
+
+TaskTraits& TaskTraits::WithShutdownBehavior(
+    TaskShutdownBehavior shutdown_behavior) {
+  shutdown_behavior_ = shutdown_behavior;
+  return *this;
+}
+
+std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
+  switch (task_priority) {
+    case TaskPriority::BACKGROUND:
+      os << "BACKGROUND";
+      break;
+    case TaskPriority::USER_VISIBLE:
+      os << "USER_VISIBLE";
+      break;
+    case TaskPriority::USER_BLOCKING:
+      os << "USER_BLOCKING";
+      break;
+  }
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const TaskShutdownBehavior& shutdown_behavior) {
+  switch (shutdown_behavior) {
+    case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
+      os << "CONTINUE_ON_SHUTDOWN";
+      break;
+    case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
+      os << "SKIP_ON_SHUTDOWN";
+      break;
+    case TaskShutdownBehavior::BLOCK_SHUTDOWN:
+      os << "BLOCK_SHUTDOWN";
+      break;
+  }
+  return os;
+}
+
+}  // namespace base
diff --git a/base/task_scheduler/task_traits.h b/base/task_scheduler/task_traits.h
new file mode 100644
index 0000000..0c0d304
--- /dev/null
+++ b/base/task_scheduler/task_traits.h
@@ -0,0 +1,140 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+#define BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Valid priorities supported by the task scheduler. Note: internal algorithms
+// depend on priorities being expressed as a continuous zero-based list from
+// lowest to highest priority. Users of this API shouldn't otherwise care about
+// nor use the underlying values.
+enum class TaskPriority {
+  // This will always be equal to the lowest priority available.
+  LOWEST = 0,
+  // User won't notice if this task takes an arbitrarily long time to complete.
+  BACKGROUND = LOWEST,
+  // This task affects UI or responsiveness of future user interactions. It is
+  // not an immediate response to a user interaction.
+  // Examples:
+  // - Updating the UI to reflect progress on a long task.
+  // - Loading data that might be shown in the UI after a future user
+  //   interaction.
+  USER_VISIBLE,
+  // This task affects UI immediately after a user interaction.
+  // Example: Generating data shown in the UI immediately after a click.
+  USER_BLOCKING,
+  // This will always be equal to the highest priority available.
+  HIGHEST = USER_BLOCKING,
+};
+
+// Valid shutdown behaviors supported by the task scheduler.
+enum class TaskShutdownBehavior {
+  // Tasks posted with this mode which have not started executing before
+  // shutdown is initiated will never run. Tasks with this mode running at
+  // shutdown will be ignored (the worker will not be joined).
+  //
+  // This option provides a nice way to post stuff you don't want blocking
+  // shutdown. For example, you might be doing a slow DNS lookup and if it's
+  // blocked on the OS, you may not want to stop shutdown, since the result
+  // doesn't really matter at that point.
+  //
+  // However, you need to be very careful what you do in your callback when you
+  // use this option. Since the thread will continue to run until the OS
+  // terminates the process, the app can be in the process of tearing down when
+  // you're running. This means any singletons or global objects you use may
+  // suddenly become invalid out from under you. For this reason, it's best to
+  // use this only for slow but simple operations like the DNS example.
+  CONTINUE_ON_SHUTDOWN,
+
+  // Tasks posted with this mode that have not started executing at
+  // shutdown will never run. However, any task that has already begun
+  // executing when shutdown is invoked will be allowed to continue and
+  // will block shutdown until completion.
+  //
+  // Note: Because TaskScheduler::Shutdown() may block while these tasks are
+  // executing, care must be taken to ensure that they do not block on the
+  // thread that called TaskScheduler::Shutdown(), as this may lead to deadlock.
+  SKIP_ON_SHUTDOWN,
+
+  // Tasks posted with this mode before shutdown is complete will block shutdown
+  // until they're executed. Generally, this should be used only to save
+  // critical user data.
+  //
+  // Note: Tasks with BACKGROUND priority that block shutdown will be promoted
+  // to USER_VISIBLE priority during shutdown.
+  BLOCK_SHUTDOWN,
+};
+
+// Describes metadata for a single task or a group of tasks.
+class BASE_EXPORT TaskTraits {
+ public:
+  // Constructs a default TaskTraits for tasks with
+  //     (1) no I/O,
+  //     (2) low priority, and
+  //     (3) may block shutdown or be skipped on shutdown.
+  // Tasks that require stricter guarantees should highlight those by requesting
+  // explicit traits below.
+  TaskTraits();
+  TaskTraits(const TaskTraits& other) = default;
+  TaskTraits& operator=(const TaskTraits& other) = default;
+  ~TaskTraits();
+
+  // Allows tasks with these traits to do file I/O.
+  TaskTraits& WithFileIO();
+
+  // Applies |priority| to tasks with these traits.
+  TaskTraits& WithPriority(TaskPriority priority);
+
+  // Applies |shutdown_behavior| to tasks with these traits.
+  TaskTraits& WithShutdownBehavior(TaskShutdownBehavior shutdown_behavior);
+
+  // Returns true if file I/O is allowed by these traits.
+  bool with_file_io() const { return with_file_io_; }
+
+  // Returns the priority of tasks with these traits.
+  TaskPriority priority() const { return priority_; }
+
+  // Returns the shutdown behavior of tasks with these traits.
+  TaskShutdownBehavior shutdown_behavior() const { return shutdown_behavior_; }
+
+ private:
+  bool with_file_io_;
+  TaskPriority priority_;
+  TaskShutdownBehavior shutdown_behavior_;
+};
+
+// Describes how tasks are executed by a task runner.
+enum class ExecutionMode {
+  // Can execute multiple tasks at a time in any order.
+  PARALLEL,
+
+  // Executes one task at a time in posting order. The sequence’s priority is
+  // equivalent to the highest priority pending task in the sequence.
+  SEQUENCED,
+
+  // Executes one task at a time on a single thread in posting order.
+  SINGLE_THREADED,
+};
+
+// Stream operators so TaskPriority and TaskShutdownBehavior can be used in
+// DCHECK statements.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os,
+                                     const TaskPriority& shutdown_behavior);
+
+BASE_EXPORT std::ostream& operator<<(
+    std::ostream& os,
+    const TaskShutdownBehavior& shutdown_behavior);
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_TRAITS_H_
diff --git a/base/task_scheduler/test_utils.h b/base/task_scheduler/test_utils.h
new file mode 100644
index 0000000..bafd09a
--- /dev/null
+++ b/base/task_scheduler/test_utils.h
@@ -0,0 +1,19 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
+#define BASE_TASK_SCHEDULER_TEST_UTILS_H_
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests misbehave on Android.
+#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define EXPECT_DCHECK_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
+#else
+#define EXPECT_DCHECK_DEATH(statement, regex)
+#endif
+
+#endif  // BASE_TASK_SCHEDULER_TEST_UTILS_H_
diff --git a/base/template_util.h b/base/template_util.h
index d58807a..1bfc1ac 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -6,117 +6,128 @@
 #define BASE_TEMPLATE_UTIL_H_
 
 #include <stddef.h>
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
 
 #include "build/build_config.h"
 
+// This hacks around libstdc++ 4.6 missing stuff in type_traits, while we need
+// to support it.
+#define CR_GLIBCXX_4_7_0 20120322
+#define CR_GLIBCXX_4_5_4 20120702
+#define CR_GLIBCXX_4_6_4 20121127
+#if defined(__GLIBCXX__) &&                                               \
+    (__GLIBCXX__ < CR_GLIBCXX_4_7_0 || __GLIBCXX__ == CR_GLIBCXX_4_5_4 || \
+     __GLIBCXX__ == CR_GLIBCXX_4_6_4)
+#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#endif
+
 namespace base {
 
-// template definitions from tr1
+template <class T> struct is_non_const_reference : std::false_type {};
+template <class T> struct is_non_const_reference<T&> : std::true_type {};
+template <class T> struct is_non_const_reference<const T&> : std::false_type {};
 
-template<class T, T v>
-struct integral_constant {
-  static const T value = v;
-  typedef T value_type;
-  typedef integral_constant<T, v> type;
-};
-
-template <class T, T v> const T integral_constant<T, v>::value;
-
-typedef integral_constant<bool, true> true_type;
-typedef integral_constant<bool, false> false_type;
-
-template <class T> struct is_pointer : false_type {};
-template <class T> struct is_pointer<T*> : true_type {};
-
-// Member function pointer detection. This is built-in to C++ 11's stdlib, and
-// we can remove this when we switch to it.
-template<typename T>
-struct is_member_function_pointer : false_type {};
-
-template <typename R, typename Z, typename... A>
-struct is_member_function_pointer<R(Z::*)(A...)> : true_type {};
-template <typename R, typename Z, typename... A>
-struct is_member_function_pointer<R(Z::*)(A...) const> : true_type {};
-
-
-template <class T, class U> struct is_same : public false_type {};
-template <class T> struct is_same<T,T> : true_type {};
-
-template<class> struct is_array : public false_type {};
-template<class T, size_t n> struct is_array<T[n]> : public true_type {};
-template<class T> struct is_array<T[]> : public true_type {};
-
-template <class T> struct is_non_const_reference : false_type {};
-template <class T> struct is_non_const_reference<T&> : true_type {};
-template <class T> struct is_non_const_reference<const T&> : false_type {};
-
-template <class T> struct is_const : false_type {};
-template <class T> struct is_const<const T> : true_type {};
-
-template <class T> struct is_void : false_type {};
-template <> struct is_void<void> : true_type {};
+// is_assignable
 
 namespace internal {
 
-// Types YesType and NoType are guaranteed such that sizeof(YesType) <
-// sizeof(NoType).
-typedef char YesType;
-
-struct NoType {
-  YesType dummy[2];
+template <typename First, typename Second>
+struct SelectSecond {
+  using type = Second;
 };
 
-// This class is an implementation detail for is_convertible, and you
-// don't need to know how it works to use is_convertible. For those
-// who care: we declare two different functions, one whose argument is
-// of type To and one with a variadic argument list. We give them
-// return types of different size, so we can use sizeof to trick the
-// compiler into telling us which function it would have chosen if we
-// had called it with an argument of type From.  See Alexandrescu's
-// _Modern C++ Design_ for more details on this sort of trick.
-
-struct ConvertHelper {
-  template <typename To>
-  static YesType Test(To);
-
-  template <typename To>
-  static NoType Test(...);
-
-  template <typename From>
-  static From& Create();
+struct Any {
+  Any(...);
 };
 
-// Used to determine if a type is a struct/union/class. Inspired by Boost's
-// is_class type_trait implementation.
-struct IsClassHelper {
-  template <typename C>
-  static YesType Test(void(C::*)(void));
+// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
+// value is a true_type.
+template <class Lvalue, class Rvalue>
+typename internal::SelectSecond<
+    decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
+    std::true_type>::type
+IsAssignableTest(Lvalue&&, Rvalue&&);
 
-  template <typename C>
-  static NoType Test(...);
-};
+// False case: Otherwise the return value is a false_type.
+template <class Rvalue>
+std::false_type IsAssignableTest(internal::Any, Rvalue&&);
+
+// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
+// determine the type of IsAssignableImpl.
+template <class Lvalue,
+          class Rvalue,
+          bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
+struct IsAssignableImpl
+    : public std::common_type<decltype(
+          internal::IsAssignableTest(std::declval<Lvalue>(),
+                                     std::declval<Rvalue>()))>::type {};
+
+// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
+// is false_type.
+template <class Lvalue, class Rvalue>
+struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
+
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct SupportsOstreamOperator : std::false_type {};
+template <typename T>
+struct SupportsOstreamOperator<T,
+                               decltype(void(std::declval<std::ostream&>()
+                                             << std::declval<T>()))>
+    : std::true_type {};
 
 }  // namespace internal
 
-// Inherits from true_type if From is convertible to To, false_type otherwise.
-//
-// Note that if the type is convertible, this will be a true_type REGARDLESS
-// of whether or not the conversion would emit a warning.
-template <typename From, typename To>
-struct is_convertible
-    : integral_constant<bool,
-                        sizeof(internal::ConvertHelper::Test<To>(
-                                   internal::ConvertHelper::Create<From>())) ==
-                        sizeof(internal::YesType)> {
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class Lvalue, class Rvalue>
+struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
+
+// is_copy_assignable is true if a T const& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_copy_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           typename std::add_lvalue_reference<
+                               typename std::add_const<T>::type>::type> {};
+
+// is_move_assignable is true if a T&& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_move_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           const typename std::add_rvalue_reference<T>::type> {
 };
 
+// underlying_type produces the integer type backing an enum type.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
 template <typename T>
-struct is_class
-    : integral_constant<bool,
-                        sizeof(internal::IsClassHelper::Test<T>(0)) ==
-                            sizeof(internal::YesType)> {
+struct underlying_type {
+  using type = __underlying_type(T);
 };
+#else
+template <typename T>
+using underlying_type = std::underlying_type<T>;
+#endif
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <class T>
+using is_trivially_destructible = std::has_trivial_destructor<T>;
+#else
+template <class T>
+using is_trivially_destructible = std::is_trivially_destructible<T>;
+#endif
 
 }  // namespace base
 
+#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+
 #endif  // BASE_TEMPLATE_UTIL_H_
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index b960ab1..9215964 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -4,31 +4,25 @@
 
 #include "base/template_util.h"
 
+#include <string>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 namespace {
 
-struct AStruct {};
-class AClass {};
-enum AnEnum {};
-
-class Parent {};
-class Child : public Parent {};
-
-// is_pointer<Type>
-static_assert(!is_pointer<int>::value, "IsPointer");
-static_assert(!is_pointer<int&>::value, "IsPointer");
-static_assert(is_pointer<int*>::value, "IsPointer");
-static_assert(is_pointer<const int*>::value, "IsPointer");
-
-// is_array<Type>
-static_assert(!is_array<int>::value, "IsArray");
-static_assert(!is_array<int*>::value, "IsArray");
-static_assert(!is_array<int (*)[3]>::value, "IsArray");
-static_assert(is_array<int[]>::value, "IsArray");
-static_assert(is_array<const int[]>::value, "IsArray");
-static_assert(is_array<int[3]>::value, "IsArray");
+enum SimpleEnum { SIMPLE_ENUM };
+enum EnumWithExplicitType : uint64_t { ENUM_WITH_EXPLICIT_TYPE };
+enum class ScopedEnum { SCOPED_ENUM };
+enum class ScopedEnumWithOperator { SCOPED_ENUM_WITH_OPERATOR };
+std::ostream& operator<<(std::ostream& os, ScopedEnumWithOperator v) {
+  return os;
+}
+struct SimpleStruct {};
+struct StructWithOperator {};
+std::ostream& operator<<(std::ostream& os, const StructWithOperator& v) {
+  return os;
+}
 
 // is_non_const_reference<Type>
 static_assert(!is_non_const_reference<int>::value, "IsNonConstReference");
@@ -36,72 +30,100 @@
               "IsNonConstReference");
 static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
 
-// is_convertible<From, To>
+class AssignParent {};
+class AssignChild : AssignParent {};
 
-// Extra parens needed to make preprocessor macro parsing happy. Otherwise,
-// it sees the equivalent of:
-//
-//     (is_convertible < Child), (Parent > ::value)
-//
-// Silly C++.
-static_assert((is_convertible<Child, Parent>::value), "IsConvertible");
-static_assert(!(is_convertible<Parent, Child>::value), "IsConvertible");
-static_assert(!(is_convertible<Parent, AStruct>::value), "IsConvertible");
-static_assert((is_convertible<int, double>::value), "IsConvertible");
-static_assert((is_convertible<int*, void*>::value), "IsConvertible");
-static_assert(!(is_convertible<void*, int*>::value), "IsConvertible");
+// is_assignable<Type1, Type2>
+static_assert(!is_assignable<int, int>::value, "IsAssignable");  // 1 = 1;
+static_assert(!is_assignable<int, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int>::value, "IsAssignable");
+static_assert(is_assignable<int&, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int&>::value, "IsAssignable");
+static_assert(is_assignable<int&, int const&>::value, "IsAssignable");
+static_assert(!is_assignable<int const&, int>::value, "IsAssignable");
+static_assert(!is_assignable<AssignParent&, AssignChild>::value,
+              "IsAssignable");
+static_assert(!is_assignable<AssignChild&, AssignParent>::value,
+              "IsAssignable");
 
-// Array types are an easy corner case.  Make sure to test that
-// it does indeed compile.
-static_assert(!(is_convertible<int[10], double>::value), "IsConvertible");
-static_assert(!(is_convertible<double, int[10]>::value), "IsConvertible");
-static_assert((is_convertible<int[10], int*>::value), "IsConvertible");
+struct AssignCopy {};
+struct AssignNoCopy {
+  AssignNoCopy& operator=(AssignNoCopy&&) { return *this; }
+  AssignNoCopy& operator=(const AssignNoCopy&) = delete;
+};
+struct AssignNoMove {
+  AssignNoMove& operator=(AssignNoMove&&) = delete;
+  AssignNoMove& operator=(const AssignNoMove&) = delete;
+};
 
-// is_same<Type1, Type2>
-static_assert(!(is_same<Child, Parent>::value), "IsSame");
-static_assert(!(is_same<Parent, Child>::value), "IsSame");
-static_assert((is_same<Parent, Parent>::value), "IsSame");
-static_assert((is_same<int*, int*>::value), "IsSame");
-static_assert((is_same<int, int>::value), "IsSame");
-static_assert((is_same<void, void>::value), "IsSame");
-static_assert(!(is_same<int, double>::value), "IsSame");
+static_assert(is_copy_assignable<AssignCopy>::value, "IsCopyAssignable");
+static_assert(!is_copy_assignable<AssignNoCopy>::value, "IsCopyAssignable");
 
-// is_class<Type>
-static_assert(is_class<AStruct>::value, "IsClass");
-static_assert(is_class<AClass>::value, "IsClass");
-static_assert(!is_class<AnEnum>::value, "IsClass");
-static_assert(!is_class<int>::value, "IsClass");
-static_assert(!is_class<char*>::value, "IsClass");
-static_assert(!is_class<int&>::value, "IsClass");
-static_assert(!is_class<char[3]>::value, "IsClass");
+static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
+static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
+static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
 
-static_assert(!is_member_function_pointer<int>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<int*>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<void*>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<AStruct>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<AStruct*>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<void (*)()>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<int (*)(int)>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<int (*)(int, int)>::value,
-              "IsMemberFunctionPointer");
+// A few standard types that definitely support printing.
+static_assert(internal::SupportsOstreamOperator<int>::value,
+              "ints should be printable");
+static_assert(internal::SupportsOstreamOperator<const char*>::value,
+              "C strings should be printable");
+static_assert(internal::SupportsOstreamOperator<std::string>::value,
+              "std::string should be printable");
 
-static_assert(is_member_function_pointer<void (AStruct::*)()>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<void (AStruct::*)(int)>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<int (AStruct::*)(int)>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<int (AStruct::*)(int) const>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<int (AStruct::*)(int, int)>::value,
-              "IsMemberFunctionPointer");
+// Various kinds of enums operator<< support.
+static_assert(internal::SupportsOstreamOperator<SimpleEnum>::value,
+              "simple enum should be printable by value");
+static_assert(internal::SupportsOstreamOperator<const SimpleEnum&>::value,
+              "simple enum should be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<EnumWithExplicitType>::value,
+              "enum with explicit type should be printable by value");
+static_assert(
+    internal::SupportsOstreamOperator<const EnumWithExplicitType&>::value,
+    "enum with explicit type should be printable by const ref");
+static_assert(!internal::SupportsOstreamOperator<ScopedEnum>::value,
+              "scoped enum should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const ScopedEnum&>::value,
+              "simple enum should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<ScopedEnumWithOperator>::value,
+              "scoped enum with operator<< should be printable by value");
+static_assert(
+    internal::SupportsOstreamOperator<const ScopedEnumWithOperator&>::value,
+    "scoped enum with operator<< should be printable by const ref");
+
+// operator<< support on structs.
+static_assert(!internal::SupportsOstreamOperator<SimpleStruct>::value,
+              "simple struct should not be printable by value");
+static_assert(!internal::SupportsOstreamOperator<const SimpleStruct&>::value,
+              "simple struct should not be printable by const ref");
+static_assert(internal::SupportsOstreamOperator<StructWithOperator>::value,
+              "struct with operator<< should be printable by value");
+static_assert(
+    internal::SupportsOstreamOperator<const StructWithOperator&>::value,
+    "struct with operator<< should be printable by const ref");
+
+// underlying type of enums
+static_assert(std::is_integral<underlying_type<SimpleEnum>::type>::value,
+              "simple enum must have some integral type");
+static_assert(
+    std::is_same<underlying_type<EnumWithExplicitType>::type, uint64_t>::value,
+    "explicit type must be detected");
+static_assert(std::is_same<underlying_type<ScopedEnum>::type, int>::value,
+              "scoped enum defaults to int");
+
+struct TriviallyDestructible {
+  int field;
+};
+
+class NonTriviallyDestructible {
+  ~NonTriviallyDestructible() {}
+};
+
+static_assert(is_trivially_destructible<int>::value, "IsTriviallyDestructible");
+static_assert(is_trivially_destructible<TriviallyDestructible>::value,
+              "IsTriviallyDestructible");
+static_assert(!is_trivially_destructible<NonTriviallyDestructible>::value,
+              "IsTriviallyDestructible");
 
 }  // namespace
 }  // namespace base
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index 463f343..51863a2 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -9,9 +9,8 @@
   import("//build/config/android/rules.gni")
 }
 
-source_set("test_config") {
-  # TODO http://crbug.com/412064 enable this flag all the time.
-  testonly = !is_component_build
+static_library("test_config") {
+  testonly = true
   sources = [
     "test_switches.cc",
     "test_switches.h",
@@ -24,10 +23,10 @@
 }
 
 # GYP: //base/base.gyp:test_support_base
-source_set("test_support") {
-  # TODO http://crbug.com/412064 enable this flag all the time.
-  testonly = !is_component_build
+static_library("test_support") {
+  testonly = true
   sources = [
+    "../trace_event/trace_config_memory_test_util.h",
     "gtest_util.cc",
     "gtest_util.h",
     "gtest_xml_unittest_result_printer.cc",
@@ -63,6 +62,8 @@
     "perf_time_logger.h",
     "power_monitor_test_base.cc",
     "power_monitor_test_base.h",
+    "scoped_command_line.cc",
+    "scoped_command_line.h",
     "scoped_locale.cc",
     "scoped_locale.h",
     "scoped_path_override.cc",
@@ -90,6 +91,8 @@
     "test_io_thread.h",
     "test_listener_ios.h",
     "test_listener_ios.mm",
+    "test_message_loop.cc",
+    "test_message_loop.h",
     "test_mock_time_task_runner.cc",
     "test_mock_time_task_runner.h",
     "test_pending_task.cc",
@@ -126,6 +129,8 @@
     sources += [
       "launcher/test_launcher.cc",
       "launcher/test_launcher.h",
+      "launcher/test_launcher_tracer.cc",
+      "launcher/test_launcher_tracer.h",
       "launcher/test_results_tracker.cc",
       "launcher/unit_test_launcher.cc",
       "multiprocess_test.cc",
@@ -168,6 +173,10 @@
     set_sources_assignment_filter(sources_assignment_filter)
   }
 
+  if (is_mac) {
+    libs = [ "AppKit.framework" ]
+  }
+
   if (is_android) {
     deps += [ ":base_unittests_jni_headers" ]
   }
@@ -208,6 +217,9 @@
   defines = [ "PERF_TEST" ]
 }
 
+# This is a source set instead of a static library because it seems like some
+# linkers get confused when "main" is in a static library, and if you link to
+# this, you always want the object file anyway.
 source_set("test_support_perf") {
   testonly = true
   sources = [
@@ -222,7 +234,7 @@
   public_configs = [ ":perf_test_config" ]
 }
 
-source_set("test_launcher_nacl_nonsfi") {
+static_library("test_launcher_nacl_nonsfi") {
   testonly = true
   sources = [
     "launcher/test_launcher_nacl_nonsfi.cc",
@@ -233,7 +245,7 @@
   ]
 }
 
-source_set("run_all_unittests") {
+static_library("run_all_unittests") {
   testonly = true
   sources = [
     "run_all_unittests.cc",
diff --git a/base/test/data/prefs/invalid.json b/base/test/data/prefs/invalid.json
deleted file mode 100644
index 43392a9..0000000
--- a/base/test/data/prefs/invalid.json
+++ /dev/null
@@ -1 +0,0 @@
-!@#$%^&
\ No newline at end of file
diff --git a/base/test/data/prefs/read.json b/base/test/data/prefs/read.json
deleted file mode 100644
index ea578a4..0000000
--- a/base/test/data/prefs/read.json
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-  "homepage": "http://www.cnn.com",
-  "some_directory": "/usr/local/",
-  "tabs": {
-    "new_windows_in_tabs": true,
-    "max_tabs": 20
-  }
-}
diff --git a/base/test/data/prefs/write.golden.json b/base/test/data/prefs/write.golden.json
deleted file mode 100644
index fb1fff1..0000000
--- a/base/test/data/prefs/write.golden.json
+++ /dev/null
@@ -1 +0,0 @@
-{"homepage":"http://www.cnn.com","long_int":{"pref":"214748364842"},"some_directory":"/usr/sbin/","tabs":{"max_tabs":10,"new_windows_in_tabs":false}}
\ No newline at end of file
diff --git a/base/test/ios/OWNERS b/base/test/ios/OWNERS
index 1b3348e..40a68c7 100644
--- a/base/test/ios/OWNERS
+++ b/base/test/ios/OWNERS
@@ -1,2 +1 @@
 rohitrao@chromium.org
-stuartmorgan@chromium.org
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
index 6a1b7b4..de56e7f 100644
--- a/base/test/multiprocess_test.cc
+++ b/base/test/multiprocess_test.cc
@@ -26,7 +26,7 @@
 
   return LaunchProcess(command_line, options);
 }
-#endif  // !defined(OS_ANDROID)
+#endif  // !OS_ANDROID && !__ANDROID__ && !__ANDROID_HOST__
 
 CommandLine GetMultiProcessTestChildBaseCommandLine() {
   CommandLine cmd_line = *CommandLine::ForCurrentProcess();
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
index ab1d0ca..ae4c3eb 100644
--- a/base/test/multiprocess_test.h
+++ b/base/test/multiprocess_test.h
@@ -66,6 +66,25 @@
 // may add any flags needed for your child process.
 CommandLine GetMultiProcessTestChildBaseCommandLine();
 
+#if defined(OS_ANDROID)
+
+// Enable the alternate test child implementation which support spawning a child
+// after threads have been created. If used, this MUST be the first line of
+// main(). The main function is passed in to avoid a link-time dependency in
+// component builds.
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**));
+
+// Returns true if the current process is a test child.
+bool AndroidIsChildProcess();
+
+// Wait for a test child to exit if the alternate test child implementation is
+// being used.
+bool AndroidWaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code)
+    WARN_UNUSED_RESULT;
+
+#endif  // defined(OS_ANDROID)
+
 // MultiProcessTest ------------------------------------------------------------
 
 // A MultiProcessTest is a test class which makes it easier to
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
index dc489d1..f58b452 100644
--- a/base/test/multiprocess_test_android.cc
+++ b/base/test/multiprocess_test_android.cc
@@ -4,17 +4,391 @@
 
 #include "base/test/multiprocess_test.h"
 
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/socket.h>
 #include <unistd.h>
 
+#include <memory>
+#include <utility>
+#include <vector>
+
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/containers/hash_tables.h"
+#include "base/lazy_instance.h"
 #include "base/logging.h"
+#include "base/macros.h"
+#include "base/pickle.h"
 #include "base/posix/global_descriptors.h"
+#include "base/posix/unix_domain_socket_linux.h"
 #include "testing/multiprocess_func_list.h"
 
 namespace base {
 
+namespace {
+
+const int kMaxMessageSize = 1024 * 1024;
+const int kFragmentSize = 4096;
+
+// Message sent between parent process and helper child process.
+enum class MessageType : uint32_t {
+  START_REQUEST,
+  START_RESPONSE,
+  WAIT_REQUEST,
+  WAIT_RESPONSE,
+};
+
+struct MessageHeader {
+  uint32_t size;
+  MessageType type;
+};
+
+struct StartProcessRequest {
+  MessageHeader header =
+      {sizeof(StartProcessRequest), MessageType::START_REQUEST};
+
+  uint32_t num_args = 0;
+  uint32_t num_fds = 0;
+};
+
+struct StartProcessResponse {
+  MessageHeader header =
+      {sizeof(StartProcessResponse), MessageType::START_RESPONSE};
+
+  pid_t child_pid;
+};
+
+struct WaitProcessRequest {
+  MessageHeader header =
+      {sizeof(WaitProcessRequest), MessageType::WAIT_REQUEST};
+
+  pid_t pid;
+  uint64_t timeout_ms;
+};
+
+struct WaitProcessResponse {
+  MessageHeader header =
+      {sizeof(WaitProcessResponse), MessageType::WAIT_RESPONSE};
+
+  bool success = false;
+  int32_t exit_code = 0;
+};
+
+// Helper class that implements an alternate test child launcher for
+// multi-process tests. The default implementation doesn't work if the child is
+// launched after starting threads. However, for some tests (i.e. Mojo), this
+// is necessary. This implementation works around that issue by forking a helper
+// process very early in main(), before any real work is done. Then, when a
+// child needs to be spawned, a message is sent to that helper process, which
+// then forks and returns the result to the parent. The forked child then calls
+// main() and things look as though a brand new process has been fork/exec'd.
+class LaunchHelper {
+ public:
+  using MainFunction = int (*)(int, char**);
+
+  LaunchHelper() {}
+
+  // Initialise the alternate test child implementation.
+  void Init(MainFunction main);
+
+  // Starts a child test helper process.
+  Process StartChildTestHelper(const std::string& procname,
+                               const CommandLine& base_command_line,
+                               const LaunchOptions& options);
+
+  // Waits for a child test helper process.
+  bool WaitForChildExitWithTimeout(const Process& process, TimeDelta timeout,
+                                   int* exit_code);
+
+  bool IsReady() const { return child_fd_ != -1; }
+  bool IsChild() const { return is_child_; }
+
+ private:
+  // Wrappers around sendmsg/recvmsg that supports message fragmentation.
+  void Send(int fd, const MessageHeader* msg, const std::vector<int>& fds);
+  ssize_t Recv(int fd, void* buf, std::vector<ScopedFD>* fds);
+
+  // Parent process implementation.
+  void DoParent(int fd);
+  // Helper process implementation.
+  void DoHelper(int fd);
+
+  void StartProcessInHelper(const StartProcessRequest* request,
+                           std::vector<ScopedFD> fds);
+  void WaitForChildInHelper(const WaitProcessRequest* request);
+
+  bool is_child_ = false;
+
+  // Parent vars.
+  int child_fd_ = -1;
+
+  // Helper vars.
+  int parent_fd_ = -1;
+  MainFunction main_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(LaunchHelper);
+};
+
+void LaunchHelper::Init(MainFunction main) {
+  main_ = main;
+
+  // Create a communication channel between the parent and child launch helper.
+  // fd[0] belongs to the parent, fd[1] belongs to the child.
+  int fds[2] = {-1, -1};
+  int rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds);
+  PCHECK(rv == 0);
+  CHECK_NE(-1, fds[0]);
+  CHECK_NE(-1, fds[1]);
+
+  pid_t pid = fork();
+  PCHECK(pid >= 0) << "Fork failed";
+  if (pid) {
+    // Parent.
+    rv = close(fds[1]);
+    PCHECK(rv == 0);
+    DoParent(fds[0]);
+  } else {
+    // Helper.
+    rv = close(fds[0]);
+    PCHECK(rv == 0);
+    DoHelper(fds[1]);
+    NOTREACHED();
+    _exit(0);
+  }
+}
+
+void LaunchHelper::Send(
+    int fd, const MessageHeader* msg, const std::vector<int>& fds) {
+  uint32_t bytes_remaining = msg->size;
+  const char* buf = reinterpret_cast<const char*>(msg);
+  while (bytes_remaining) {
+    size_t send_size =
+        (bytes_remaining > kFragmentSize) ? kFragmentSize : bytes_remaining;
+    bool success = UnixDomainSocket::SendMsg(
+        fd, buf, send_size,
+        (bytes_remaining == msg->size) ? fds : std::vector<int>());
+    CHECK(success);
+    bytes_remaining -= send_size;
+    buf += send_size;
+  }
+}
+
+ssize_t LaunchHelper::Recv(int fd, void* buf, std::vector<ScopedFD>* fds) {
+  ssize_t size = UnixDomainSocket::RecvMsg(fd, buf, kFragmentSize, fds);
+  if (size <= 0)
+    return size;
+
+  const MessageHeader* header = reinterpret_cast<const MessageHeader*>(buf);
+  CHECK(header->size < kMaxMessageSize);
+  uint32_t bytes_remaining = header->size - size;
+  char* buffer = reinterpret_cast<char*>(buf);
+  buffer += size;
+  while (bytes_remaining) {
+    std::vector<ScopedFD> dummy_fds;
+    size = UnixDomainSocket::RecvMsg(fd, buffer, kFragmentSize, &dummy_fds);
+    if (size <= 0)
+      return size;
+
+    CHECK(dummy_fds.empty());
+    CHECK(size == kFragmentSize ||
+          static_cast<size_t>(size) == bytes_remaining);
+    bytes_remaining -= size;
+    buffer += size;
+  }
+  return header->size;
+}
+
+void LaunchHelper::DoParent(int fd) {
+  child_fd_ = fd;
+}
+
+void LaunchHelper::DoHelper(int fd) {
+  parent_fd_ = fd;
+  is_child_ = true;
+  std::unique_ptr<char[]> buf(new char[kMaxMessageSize]);
+  while (true) {
+    // Wait for a message from the parent.
+    std::vector<ScopedFD> fds;
+    ssize_t size = Recv(parent_fd_, buf.get(), &fds);
+    if (size == 0 || (size < 0 && errno == ECONNRESET)) {
+      _exit(0);
+    }
+    PCHECK(size > 0);
+
+    const MessageHeader* header =
+        reinterpret_cast<const MessageHeader*>(buf.get());
+    CHECK_EQ(static_cast<ssize_t>(header->size), size);
+    switch (header->type) {
+      case MessageType::START_REQUEST:
+        StartProcessInHelper(
+            reinterpret_cast<const StartProcessRequest*>(buf.get()),
+            std::move(fds));
+        break;
+      case MessageType::WAIT_REQUEST:
+        WaitForChildInHelper(
+            reinterpret_cast<const WaitProcessRequest*>(buf.get()));
+        break;
+      default:
+        LOG(FATAL) << "Unsupported message type: "
+                   << static_cast<uint32_t>(header->type);
+    }
+  }
+}
+
+void LaunchHelper::StartProcessInHelper(const StartProcessRequest* request,
+                                        std::vector<ScopedFD> fds) {
+  pid_t pid = fork();
+  PCHECK(pid >= 0) << "Fork failed";
+  if (pid) {
+    // Helper.
+    StartProcessResponse resp;
+    resp.child_pid = pid;
+    Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+         std::vector<int>());
+  } else {
+    // Child.
+    PCHECK(close(parent_fd_) == 0);
+    parent_fd_ = -1;
+    CommandLine::Reset();
+
+    Pickle serialised_extra(reinterpret_cast<const char*>(request + 1),
+                            request->header.size - sizeof(StartProcessRequest));
+    PickleIterator iter(serialised_extra);
+    std::vector<std::string> args;
+    for (size_t i = 0; i < request->num_args; i++) {
+      std::string arg;
+      CHECK(iter.ReadString(&arg));
+      args.push_back(std::move(arg));
+    }
+
+    CHECK_EQ(request->num_fds, fds.size());
+    for (size_t i = 0; i < request->num_fds; i++) {
+      int new_fd;
+      CHECK(iter.ReadInt(&new_fd));
+      int old_fd = fds[i].release();
+      if (new_fd != old_fd) {
+        if (dup2(old_fd, new_fd) < 0) {
+          PLOG(FATAL) << "dup2";
+        }
+        PCHECK(close(old_fd) == 0);
+      }
+    }
+
+    // argv has argc+1 elements, where the last element is NULL.
+    std::unique_ptr<char*[]> argv(new char*[args.size() + 1]);
+    for (size_t i = 0; i < args.size(); i++) {
+      argv[i] = const_cast<char*>(args[i].c_str());
+    }
+    argv[args.size()] = nullptr;
+    _exit(main_(args.size(), argv.get()));
+    NOTREACHED();
+  }
+}
+
+void LaunchHelper::WaitForChildInHelper(const WaitProcessRequest* request) {
+  Process process(request->pid);
+  TimeDelta timeout = TimeDelta::FromMilliseconds(request->timeout_ms);
+  int exit_code = -1;
+  bool success = process.WaitForExitWithTimeout(timeout, &exit_code);
+
+  WaitProcessResponse resp;
+  resp.exit_code = exit_code;
+  resp.success = success;
+  Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+       std::vector<int>());
+}
+
+Process LaunchHelper::StartChildTestHelper(const std::string& procname,
+                                           const CommandLine& base_command_line,
+                                           const LaunchOptions& options) {
+
+  CommandLine command_line(base_command_line);
+  if (!command_line.HasSwitch(switches::kTestChildProcess))
+    command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+
+  StartProcessRequest request;
+  Pickle serialised_extra;
+  const CommandLine::StringVector& argv = command_line.argv();
+  for (const auto& arg : argv)
+    CHECK(serialised_extra.WriteString(arg));
+  request.num_args = argv.size();
+
+  std::vector<int> fds_to_send;
+  if (options.fds_to_remap) {
+    for (auto p : *options.fds_to_remap) {
+      CHECK(serialised_extra.WriteInt(p.second));
+      fds_to_send.push_back(p.first);
+    }
+    request.num_fds = options.fds_to_remap->size();
+  }
+
+  size_t buf_size = sizeof(StartProcessRequest) + serialised_extra.size();
+  request.header.size = buf_size;
+  std::unique_ptr<char[]> buffer(new char[buf_size]);
+  memcpy(buffer.get(), &request, sizeof(StartProcessRequest));
+  memcpy(buffer.get() + sizeof(StartProcessRequest), serialised_extra.data(),
+         serialised_extra.size());
+
+  // Send start message.
+  Send(child_fd_, reinterpret_cast<const MessageHeader*>(buffer.get()),
+       fds_to_send);
+
+  // Synchronously get response.
+  StartProcessResponse response;
+  std::vector<ScopedFD> recv_fds;
+  ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+  PCHECK(resp_size == sizeof(StartProcessResponse));
+
+  return Process(response.child_pid);
+}
+
+bool LaunchHelper::WaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code) {
+
+  WaitProcessRequest request;
+  request.pid = process.Handle();
+  request.timeout_ms = timeout.InMilliseconds();
+
+  Send(child_fd_, reinterpret_cast<const MessageHeader*>(&request),
+       std::vector<int>());
+
+  WaitProcessResponse response;
+  std::vector<ScopedFD> recv_fds;
+  ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+  PCHECK(resp_size == sizeof(WaitProcessResponse));
+
+  if (!response.success)
+    return false;
+
+  *exit_code = response.exit_code;
+  return true;
+}
+
+LazyInstance<LaunchHelper>::Leaky g_launch_helper;
+
+}  // namespace
+
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**)) {
+  DCHECK(main);
+  // Don't allow child processes to themselves create new child processes.
+  if (g_launch_helper.Get().IsChild())
+    return;
+  g_launch_helper.Get().Init(main);
+}
+
+bool AndroidIsChildProcess() {
+  return g_launch_helper.Get().IsChild();
+}
+
+bool AndroidWaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code) {
+  CHECK(g_launch_helper.Get().IsReady());
+  return g_launch_helper.Get().WaitForChildExitWithTimeout(
+      process, timeout, exit_code);
+}
+
 // A very basic implementation for Android. On Android tests can run in an APK
 // and we don't have an executable to exec*. This implementation does the bare
 // minimum to execute the method specified by procname (in the child process).
@@ -22,6 +396,11 @@
 Process SpawnMultiProcessTestChild(const std::string& procname,
                                    const CommandLine& base_command_line,
                                    const LaunchOptions& options) {
+  if (g_launch_helper.Get().IsReady()) {
+    return g_launch_helper.Get().StartChildTestHelper(
+        procname, base_command_line, options);
+  }
+
   // TODO(viettrungluu): The FD-remapping done below is wrong in the presence of
   // cycles (e.g., fd1 -> fd2, fd2 -> fd1). crbug.com/326576
   FileHandleMappingVector empty;
diff --git a/base/test/sequenced_worker_pool_owner.cc b/base/test/sequenced_worker_pool_owner.cc
index 37bad2b..8781495 100644
--- a/base/test/sequenced_worker_pool_owner.cc
+++ b/base/test/sequenced_worker_pool_owner.cc
@@ -54,7 +54,8 @@
 }
 
 void SequencedWorkerPoolOwner::OnDestruct() {
-  constructor_message_loop_->PostTask(FROM_HERE, exit_loop_.QuitClosure());
+  constructor_message_loop_->task_runner()->PostTask(FROM_HERE,
+                                                     exit_loop_.QuitClosure());
 }
 
 }  // namespace base
diff --git a/base/test/simple_test_tick_clock.cc b/base/test/simple_test_tick_clock.cc
index 1b4696f..c6375bd 100644
--- a/base/test/simple_test_tick_clock.cc
+++ b/base/test/simple_test_tick_clock.cc
@@ -23,4 +23,9 @@
   now_ticks_ += delta;
 }
 
+void SimpleTestTickClock::SetNowTicks(TimeTicks ticks) {
+  AutoLock lock(lock_);
+  now_ticks_ = ticks;
+}
+
 }  // namespace base
diff --git a/base/test/simple_test_tick_clock.h b/base/test/simple_test_tick_clock.h
index aebdebc..f2f7581 100644
--- a/base/test/simple_test_tick_clock.h
+++ b/base/test/simple_test_tick_clock.h
@@ -26,6 +26,9 @@
   // Advances the clock by |delta|, which must not be negative.
   void Advance(TimeDelta delta);
 
+  // Sets the clock to the given time.
+  void SetNowTicks(TimeTicks ticks);
+
  private:
   // Protects |now_ticks_|.
   Lock lock_;
diff --git a/base/test/test_io_thread.cc b/base/test/test_io_thread.cc
index 48c1e16..1fa0412 100644
--- a/base/test/test_io_thread.cc
+++ b/base/test/test_io_thread.cc
@@ -56,7 +56,8 @@
 
 void TestIOThread::PostTaskAndWait(const tracked_objects::Location& from_here,
                                    const base::Closure& task) {
-  base::WaitableEvent event(false, false);
+  base::WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                            WaitableEvent::InitialState::NOT_SIGNALED);
   task_runner()->PostTask(from_here,
                           base::Bind(&PostTaskAndWaitHelper, &event, task));
   event.Wait();
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 3f2c79d..87b107e 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -22,6 +22,8 @@
       delay(delay),
       nestability(nestability) {}
 
+TestPendingTask::TestPendingTask(const TestPendingTask& other) = default;
+
 TimeTicks TestPendingTask::GetTimeToRun() const {
   return post_time + delay;
 }
@@ -50,12 +52,12 @@
   state->SetInteger("delay", delay.ToInternalValue());
 }
 
-scoped_refptr<base::trace_event::ConvertableToTraceFormat>
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
 TestPendingTask::AsValue() const {
-  scoped_refptr<base::trace_event::TracedValue> state =
-      new base::trace_event::TracedValue();
+  std::unique_ptr<base::trace_event::TracedValue> state(
+      new base::trace_event::TracedValue());
   AsValueInto(state.get());
-  return state;
+  return std::move(state);
 }
 
 std::string TestPendingTask::ToString() const {
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 829baa6..2dbdb7e 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -21,6 +21,7 @@
   enum TestNestability { NESTABLE, NON_NESTABLE };
 
   TestPendingTask();
+  TestPendingTask(const TestPendingTask& other);
   TestPendingTask(const tracked_objects::Location& location,
                   const Closure& task,
                   TimeTicks post_time,
@@ -58,7 +59,7 @@
   // Functions for using test pending task with tracing, useful in unit
   // testing.
   void AsValueInto(base::trace_event::TracedValue* state) const;
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
   std::string ToString() const;
 };
 
diff --git a/base/test/test_switches.cc b/base/test/test_switches.cc
index 40f20d7..817a38e 100644
--- a/base/test/test_switches.cc
+++ b/base/test/test_switches.cc
@@ -62,6 +62,10 @@
 
 // Time (in milliseconds) that the tests should wait before timing out.
 const char switches::kTestLauncherTimeout[] = "test-launcher-timeout";
+
+// Path where to save a trace of test launcher's execution.
+const char switches::kTestLauncherTrace[] = "test-launcher-trace";
+
 // TODO(phajdan.jr): Clean up the switch names.
 const char switches::kTestTinyTimeout[] = "test-tiny-timeout";
 const char switches::kUiTestActionTimeout[] = "ui-test-action-timeout";
diff --git a/base/test/test_switches.h b/base/test/test_switches.h
index 419b755..88ef0ce 100644
--- a/base/test/test_switches.h
+++ b/base/test/test_switches.h
@@ -24,6 +24,7 @@
 extern const char kTestLauncherShardIndex[];
 extern const char kTestLauncherTotalShards[];
 extern const char kTestLauncherTimeout[];
+extern const char kTestLauncherTrace[];
 extern const char kTestTinyTimeout[];
 extern const char kUiTestActionTimeout[];
 extern const char kUiTestActionMaxTimeout[];
diff --git a/base/test/trace_event_analyzer.cc b/base/test/trace_event_analyzer.cc
index 2046355..64436dc 100644
--- a/base/test/trace_event_analyzer.cc
+++ b/base/test/trace_event_analyzer.cc
@@ -7,10 +7,10 @@
 #include <math.h>
 
 #include <algorithm>
+#include <memory>
 #include <set>
 
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/pattern.h"
 #include "base/values.h"
 
@@ -26,9 +26,13 @@
       other_event(NULL) {
 }
 
+TraceEvent::TraceEvent(TraceEvent&& other) = default;
+
 TraceEvent::~TraceEvent() {
 }
 
+TraceEvent& TraceEvent::operator=(TraceEvent&& rhs) = default;
+
 bool TraceEvent::SetFromJSON(const base::Value* event_value) {
   if (event_value->GetType() != base::Value::TYPE_DICTIONARY) {
     LOG(ERROR) << "Value must be TYPE_DICTIONARY";
@@ -52,6 +56,12 @@
   bool require_id = (phase == TRACE_EVENT_PHASE_ASYNC_BEGIN ||
                      phase == TRACE_EVENT_PHASE_ASYNC_STEP_INTO ||
                      phase == TRACE_EVENT_PHASE_ASYNC_STEP_PAST ||
+                     phase == TRACE_EVENT_PHASE_MEMORY_DUMP ||
+                     phase == TRACE_EVENT_PHASE_ENTER_CONTEXT ||
+                     phase == TRACE_EVENT_PHASE_LEAVE_CONTEXT ||
+                     phase == TRACE_EVENT_PHASE_CREATE_OBJECT ||
+                     phase == TRACE_EVENT_PHASE_DELETE_OBJECT ||
+                     phase == TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ||
                      phase == TRACE_EVENT_PHASE_ASYNC_END);
 
   if (require_origin && !dictionary->GetInteger("pid", &thread.process_id)) {
@@ -101,11 +111,9 @@
       arg_numbers[it.key()] = static_cast<double>(boolean ? 1 : 0);
     } else if (it.value().GetAsDouble(&double_num)) {
       arg_numbers[it.key()] = double_num;
-    } else {
-      LOG(WARNING) << "Value type of argument is not supported: " <<
-          static_cast<int>(it.value().GetType());
-      continue;  // Skip non-supported arguments.
     }
+    // Record all arguments as values.
+    arg_values[it.key()] = it.value().CreateDeepCopy();
   }
 
   return true;
@@ -117,9 +125,9 @@
 
 bool TraceEvent::GetArgAsString(const std::string& name,
                                 std::string* arg) const {
-  std::map<std::string, std::string>::const_iterator i = arg_strings.find(name);
-  if (i != arg_strings.end()) {
-    *arg = i->second;
+  const auto it = arg_strings.find(name);
+  if (it != arg_strings.end()) {
+    *arg = it->second;
     return true;
   }
   return false;
@@ -127,9 +135,19 @@
 
 bool TraceEvent::GetArgAsNumber(const std::string& name,
                                 double* arg) const {
-  std::map<std::string, double>::const_iterator i = arg_numbers.find(name);
-  if (i != arg_numbers.end()) {
-    *arg = i->second;
+  const auto it = arg_numbers.find(name);
+  if (it != arg_numbers.end()) {
+    *arg = it->second;
+    return true;
+  }
+  return false;
+}
+
+bool TraceEvent::GetArgAsValue(const std::string& name,
+                               std::unique_ptr<base::Value>* arg) const {
+  const auto it = arg_values.find(name);
+  if (it != arg_values.end()) {
+    *arg = it->second->CreateDeepCopy();
     return true;
   }
   return false;
@@ -143,6 +161,10 @@
   return (arg_numbers.find(name) != arg_numbers.end());
 }
 
+bool TraceEvent::HasArg(const std::string& name) const {
+  return (arg_values.find(name) != arg_values.end());
+}
+
 std::string TraceEvent::GetKnownArgAsString(const std::string& name) const {
   std::string arg_string;
   bool result = GetArgAsString(name, &arg_string);
@@ -171,6 +193,14 @@
   return (arg_double != 0.0);
 }
 
+std::unique_ptr<base::Value> TraceEvent::GetKnownArgAsValue(
+    const std::string& name) const {
+  std::unique_ptr<base::Value> arg_value;
+  bool result = GetArgAsValue(name, &arg_value);
+  DCHECK(result);
+  return arg_value;
+}
+
 // QueryNode
 
 QueryNode::QueryNode(const Query& query) : query_(query) {
@@ -649,7 +679,7 @@
 
 bool ParseEventsFromJson(const std::string& json,
                          std::vector<TraceEvent>* output) {
-  scoped_ptr<base::Value> root = base::JSONReader::Read(json);
+  std::unique_ptr<base::Value> root = base::JSONReader::Read(json);
 
   base::ListValue* root_list = NULL;
   if (!root.get() || !root->GetAsList(&root_list))
@@ -660,7 +690,7 @@
     if (root_list->Get(i, &item)) {
       TraceEvent event;
       if (event.SetFromJSON(item))
-        output->push_back(event);
+        output->push_back(std::move(event));
       else
         return false;
     }
@@ -682,7 +712,7 @@
 
 // static
 TraceAnalyzer* TraceAnalyzer::Create(const std::string& json_events) {
-  scoped_ptr<TraceAnalyzer> analyzer(new TraceAnalyzer());
+  std::unique_ptr<TraceAnalyzer> analyzer(new TraceAnalyzer());
   if (analyzer->SetEvents(json_events))
     return analyzer.release();
   return NULL;
diff --git a/base/test/trace_event_analyzer.h b/base/test/trace_event_analyzer.h
index f67445a..0e2366b 100644
--- a/base/test/trace_event_analyzer.h
+++ b/base/test/trace_event_analyzer.h
@@ -111,6 +111,7 @@
   };
 
   TraceEvent();
+  TraceEvent(TraceEvent&& other);
   ~TraceEvent();
 
   bool SetFromJSON(const base::Value* event_value) WARN_UNUSED_RESULT;
@@ -119,6 +120,8 @@
     return timestamp < rhs.timestamp;
   }
 
+  TraceEvent& operator=(TraceEvent&& rhs);
+
   bool has_other_event() const { return other_event; }
 
   // Returns absolute duration in microseconds between this event and other
@@ -130,11 +133,16 @@
   bool GetArgAsString(const std::string& name, std::string* arg) const;
   // Return the argument value if it exists and it is a number.
   bool GetArgAsNumber(const std::string& name, double* arg) const;
+  // Return the argument value if it exists.
+  bool GetArgAsValue(const std::string& name,
+                     std::unique_ptr<base::Value>* arg) const;
 
   // Check if argument exists and is string.
   bool HasStringArg(const std::string& name) const;
   // Check if argument exists and is number (double, int or bool).
   bool HasNumberArg(const std::string& name) const;
+  // Check if argument exists.
+  bool HasArg(const std::string& name) const;
 
   // Get known existing arguments as specific types.
   // Useful when you have already queried the argument with
@@ -143,6 +151,8 @@
   double GetKnownArgAsDouble(const std::string& name) const;
   int GetKnownArgAsInt(const std::string& name) const;
   bool GetKnownArgAsBool(const std::string& name) const;
+  std::unique_ptr<base::Value> GetKnownArgAsValue(
+      const std::string& name) const;
 
   // Process ID and Thread ID.
   ProcessThreadID thread;
@@ -150,22 +160,17 @@
   // Time since epoch in microseconds.
   // Stored as double to match its JSON representation.
   double timestamp;
-
   double duration;
-
   char phase;
-
   std::string category;
-
   std::string name;
-
   std::string id;
 
   // All numbers and bool values from TraceEvent args are cast to double.
   // bool becomes 1.0 (true) or 0.0 (false).
   std::map<std::string, double> arg_numbers;
-
   std::map<std::string, std::string> arg_strings;
+  std::map<std::string, std::unique_ptr<base::Value>> arg_values;
 
   // The other event associated with this event (or NULL).
   const TraceEvent* other_event;
diff --git a/base/test/trace_event_analyzer_unittest.cc b/base/test/trace_event_analyzer_unittest.cc
index 700b920..086cfc9 100644
--- a/base/test/trace_event_analyzer_unittest.cc
+++ b/base/test/trace_event_analyzer_unittest.cc
@@ -2,14 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/test/trace_event_analyzer.h"
+
 #include <stddef.h>
 #include <stdint.h>
 
 #include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/test/trace_event_analyzer.h"
 #include "base/threading/platform_thread.h"
 #include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event_argument.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -56,7 +60,9 @@
 
 void TraceEventAnalyzerTest::EndTracing() {
   base::trace_event::TraceLog::GetInstance()->SetDisabled();
-  base::WaitableEvent flush_complete_event(false, false);
+  base::WaitableEvent flush_complete_event(
+      base::WaitableEvent::ResetPolicy::AUTOMATIC,
+      base::WaitableEvent::InitialState::NOT_SIGNALED);
   base::trace_event::TraceLog::GetInstance()->Flush(
       base::Bind(&TraceEventAnalyzerTest::OnTraceDataCollected,
                  base::Unretained(this),
@@ -74,8 +80,8 @@
   buffer_.Start();
   buffer_.Finish();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
 
   // Search for all events and verify that nothing is returned.
@@ -97,6 +103,7 @@
   event.arg_numbers["int"] = static_cast<double>(int_num);
   event.arg_numbers["double"] = double_num;
   event.arg_strings["string"] = str;
+  event.arg_values["dict"] = WrapUnique(new base::DictionaryValue());
 
   ASSERT_TRUE(event.HasNumberArg("false"));
   ASSERT_TRUE(event.HasNumberArg("true"));
@@ -105,12 +112,18 @@
   ASSERT_TRUE(event.HasStringArg("string"));
   ASSERT_FALSE(event.HasNumberArg("notfound"));
   ASSERT_FALSE(event.HasStringArg("notfound"));
+  ASSERT_TRUE(event.HasArg("dict"));
+  ASSERT_FALSE(event.HasArg("notfound"));
 
   EXPECT_FALSE(event.GetKnownArgAsBool("false"));
   EXPECT_TRUE(event.GetKnownArgAsBool("true"));
   EXPECT_EQ(int_num, event.GetKnownArgAsInt("int"));
   EXPECT_EQ(double_num, event.GetKnownArgAsDouble("double"));
   EXPECT_STREQ(str, event.GetKnownArgAsString("string").c_str());
+
+  std::unique_ptr<base::Value> arg;
+  EXPECT_TRUE(event.GetArgAsValue("dict", &arg));
+  EXPECT_EQ(base::Value::TYPE_DICTIONARY, arg->GetType());
 }
 
 TEST_F(TraceEventAnalyzerTest, QueryEventMember) {
@@ -226,8 +239,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer);
   analyzer->SetIgnoreMetadataEvents(true);
 
@@ -317,8 +330,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
 
   TraceEventVector found;
@@ -372,8 +385,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->SetIgnoreMetadataEvents(true);
 
@@ -422,8 +435,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -464,8 +477,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -496,8 +509,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -519,8 +532,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -552,8 +565,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateAsyncBeginEndEvents();
 
@@ -584,8 +597,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateAsyncBeginEndEvents();
 
@@ -637,8 +650,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
 
   // begin, end, and match queries to find proper begin/end pairs.
@@ -711,8 +724,7 @@
   std::vector<TraceEvent> events;
   events.reserve(100);
   TraceEventVector event_ptrs;
-  TraceEvent event;
-  event.timestamp = 0.0;
+  double timestamp = 0.0;
   double little_delta = 1.0;
   double big_delta = 10.0;
   double tiny_delta = 0.1;
@@ -721,8 +733,10 @@
 
   // Insert 10 events, each apart by little_delta.
   for (int i = 0; i < 10; ++i) {
-    event.timestamp += little_delta;
-    events.push_back(event);
+    timestamp += little_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
     event_ptrs.push_back(&events.back());
   }
 
@@ -733,9 +747,13 @@
   EXPECT_EQ(0.0, stats.standard_deviation_us);
 
   // Add an event apart by big_delta.
-  event.timestamp += big_delta;
-  events.push_back(event);
-  event_ptrs.push_back(&events.back());
+  {
+    timestamp += big_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
+    event_ptrs.push_back(&events.back());
+  }
 
   ASSERT_TRUE(GetRateStats(event_ptrs, &stats, NULL));
   EXPECT_LT(little_delta, stats.mean_us);
@@ -753,9 +771,13 @@
   EXPECT_EQ(0.0, stats.standard_deviation_us);
 
   // Add an event apart by tiny_delta.
-  event.timestamp += tiny_delta;
-  events.push_back(event);
-  event_ptrs.push_back(&events.back());
+  {
+    timestamp += tiny_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
+    event_ptrs.push_back(&events.back());
+  }
 
   // Trim off both the biggest and tiniest delta and verify stats.
   options.trim_min = 1;
@@ -767,17 +789,20 @@
   EXPECT_EQ(0.0, stats.standard_deviation_us);
 
   // Verify smallest allowed number of events.
-  TraceEventVector few_event_ptrs;
-  few_event_ptrs.push_back(&event);
-  few_event_ptrs.push_back(&event);
-  ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, NULL));
-  few_event_ptrs.push_back(&event);
-  ASSERT_TRUE(GetRateStats(few_event_ptrs, &stats, NULL));
+  {
+    TraceEvent event;
+    TraceEventVector few_event_ptrs;
+    few_event_ptrs.push_back(&event);
+    few_event_ptrs.push_back(&event);
+    ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, NULL));
+    few_event_ptrs.push_back(&event);
+    ASSERT_TRUE(GetRateStats(few_event_ptrs, &stats, NULL));
 
-  // Trim off more than allowed and verify failure.
-  options.trim_min = 0;
-  options.trim_max = 1;
-  ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, &options));
+    // Trim off more than allowed and verify failure.
+    options.trim_min = 0;
+    options.trim_max = 1;
+    ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, &options));
+  }
 }
 
 // Test FindFirstOf and FindLastOf.
@@ -894,5 +919,37 @@
   EXPECT_EQ(num_named, CountMatches(event_ptrs, query_named));
 }
 
+TEST_F(TraceEventAnalyzerTest, ComplexArgument) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    std::unique_ptr<base::trace_event::TracedValue> value(
+        new base::trace_event::TracedValue);
+    value->SetString("property", "value");
+    TRACE_EVENT1("cat", "name", "arg", std::move(value));
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+
+  TraceEventVector events;
+  analyzer->FindEvents(Query::EventName() == Query::String("name"), &events);
+
+  EXPECT_EQ(1u, events.size());
+  EXPECT_EQ("cat", events[0]->category);
+  EXPECT_EQ("name", events[0]->name);
+  EXPECT_TRUE(events[0]->HasArg("arg"));
+
+  std::unique_ptr<base::Value> arg;
+  events[0]->GetArgAsValue("arg", &arg);
+  base::DictionaryValue* arg_dict;
+  EXPECT_TRUE(arg->GetAsDictionary(&arg_dict));
+  std::string property;
+  EXPECT_TRUE(arg_dict->GetString("property", &property));
+  EXPECT_EQ("value", property);
+}
 
 }  // namespace trace_analyzer
diff --git a/base/third_party/dynamic_annotations/dynamic_annotations.h b/base/third_party/dynamic_annotations/dynamic_annotations.h
new file mode 100644
index 0000000..8d7f052
--- /dev/null
+++ b/base/third_party/dynamic_annotations/dynamic_annotations.h
@@ -0,0 +1,595 @@
+/* Copyright (c) 2011, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file defines dynamic annotations for use with dynamic analysis
+   tool such as valgrind, PIN, etc.
+
+   Dynamic annotation is a source code annotation that affects
+   the generated code (that is, the annotation is not a comment).
+   Each such annotation is attached to a particular
+   instruction and/or to a particular object (address) in the program.
+
+   The annotations that should be used by users are macros in all upper-case
+   (e.g., ANNOTATE_NEW_MEMORY).
+
+   Actual implementation of these macros may differ depending on the
+   dynamic analysis tool being used.
+
+   See http://code.google.com/p/data-race-test/  for more information.
+
+   This file supports the following dynamic analysis tools:
+   - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
+      Macros are defined empty.
+   - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
+      Macros are defined as calls to non-inlinable empty functions
+      that are intercepted by Valgrind. */
+
+#ifndef __DYNAMIC_ANNOTATIONS_H__
+#define __DYNAMIC_ANNOTATIONS_H__
+
+#ifndef DYNAMIC_ANNOTATIONS_PREFIX
+# define DYNAMIC_ANNOTATIONS_PREFIX
+#endif
+
+#ifndef DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND
+# define DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND 1
+#endif
+
+#ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK
+# ifdef __GNUC__
+#  define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+# else
+/* TODO(glider): for Windows support we may want to change this macro in order
+   to prepend __declspec(selectany) to the annotations' declarations. */
+#  error weak annotations are not supported for your compiler
+# endif
+#else
+# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+#endif
+
+/* The following preprocessor magic prepends the value of
+   DYNAMIC_ANNOTATIONS_PREFIX to annotation function names. */
+#define DYNAMIC_ANNOTATIONS_GLUE0(A, B) A##B
+#define DYNAMIC_ANNOTATIONS_GLUE(A, B) DYNAMIC_ANNOTATIONS_GLUE0(A, B)
+#define DYNAMIC_ANNOTATIONS_NAME(name) \
+  DYNAMIC_ANNOTATIONS_GLUE(DYNAMIC_ANNOTATIONS_PREFIX, name)
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+# define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing condition variables such as CondVar,
+     using conditional critical sections (Await/LockWhen) and when constructing
+     user-defined synchronization mechanisms.
+
+     The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
+     be used to define happens-before arcs in user-defined synchronization
+     mechanisms:  the race detector will infer an arc from the former to the
+     latter when they share the same argument pointer.
+
+     Example 1 (reference counting):
+
+     void Unref() {
+       ANNOTATE_HAPPENS_BEFORE(&refcount_);
+       if (AtomicDecrementByOne(&refcount_) == 0) {
+         ANNOTATE_HAPPENS_AFTER(&refcount_);
+         delete this;
+       }
+     }
+
+     Example 2 (message queue):
+
+     void MyQueue::Put(Type *e) {
+       MutexLock lock(&mu_);
+       ANNOTATE_HAPPENS_BEFORE(e);
+       PutElementIntoMyQueue(e);
+     }
+
+     Type *MyQueue::Get() {
+       MutexLock lock(&mu_);
+       Type *e = GetElementFromMyQueue();
+       ANNOTATE_HAPPENS_AFTER(e);
+       return e;
+     }
+
+     Note: when possible, please use the existing reference counting and message
+     queue implementations instead of inventing new ones. */
+
+  /* Report that wait on the condition variable at address "cv" has succeeded
+     and the lock at address "lock" is held. */
+  #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, lock)
+
+  /* Report that wait on the condition variable at "cv" has succeeded.  Variant
+     w/o lock. */
+  #define ANNOTATE_CONDVAR_WAIT(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, NULL)
+
+  /* Report that we are about to signal on the condition variable at address
+     "cv". */
+  #define ANNOTATE_CONDVAR_SIGNAL(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(__FILE__, __LINE__, cv)
+
+  /* Report that we are about to signal_all on the condition variable at address
+     "cv". */
+  #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(__FILE__, __LINE__, cv)
+
+  /* Annotations for user-defined synchronization mechanisms. */
+  #define ANNOTATE_HAPPENS_BEFORE(obj) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(__FILE__, __LINE__, obj)
+  #define ANNOTATE_HAPPENS_AFTER(obj) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(__FILE__, __LINE__, obj)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(__FILE__, __LINE__, \
+        pointer, size)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(__FILE__, __LINE__, \
+        pointer, size)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size)   \
+    do {                                              \
+      ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
+      ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size);   \
+    } while (0)
+
+  /* Instruct the tool to create a happens-before arc between mu->Unlock() and
+     mu->Lock(). This annotation may slow down the race detector and hide real
+     races. Normally it is used only when it would be difficult to annotate each
+     of the mutex's critical sections individually using the annotations above.
+     This annotation makes sense only for hybrid race detectors. For pure
+     happens-before detectors this is a no-op. For more details see
+     http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
+  #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+        mu)
+
+  /* Opposite to ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX.
+     Instruct the tool to NOT create h-b arcs between Unlock and Lock, even in
+     pure happens-before mode. For a hybrid mode this is a no-op. */
+  #define ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(__FILE__, __LINE__, mu)
+
+  /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
+  #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+        mu)
+
+  /* -------------------------------------------------------------
+     Annotations useful when defining memory allocators, or when memory that
+     was protected in one way starts to be protected in another. */
+
+  /* Report that a new memory at "address" of size "size" has been allocated.
+     This might be used when the memory has been retrieved from a free list and
+     is about to be reused, or when a the locking discipline for a variable
+     changes. */
+  #define ANNOTATE_NEW_MEMORY(address, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(__FILE__, __LINE__, address, \
+        size)
+
+  /* -------------------------------------------------------------
+     Annotations useful when defining FIFO queues that transfer data between
+     threads. */
+
+  /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
+     address "pcq" has been created.  The ANNOTATE_PCQ_* annotations
+     should be used only for FIFO queues.  For non-FIFO queues use
+     ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
+  #define ANNOTATE_PCQ_CREATE(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(__FILE__, __LINE__, pcq)
+
+  /* Report that the queue at address "pcq" is about to be destroyed. */
+  #define ANNOTATE_PCQ_DESTROY(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(__FILE__, __LINE__, pcq)
+
+  /* Report that we are about to put an element into a FIFO queue at address
+     "pcq". */
+  #define ANNOTATE_PCQ_PUT(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(__FILE__, __LINE__, pcq)
+
+  /* Report that we've just got an element from a FIFO queue at address
+     "pcq". */
+  #define ANNOTATE_PCQ_GET(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(__FILE__, __LINE__, pcq)
+
+  /* -------------------------------------------------------------
+     Annotations that suppress errors.  It is usually better to express the
+     program's synchronization using the other annotations, but these can
+     be used when all else fails. */
+
+  /* Report that we may have a benign race at "pointer", with size
+     "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
+     point where "pointer" has been allocated, preferably close to the point
+     where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC. */
+  #define ANNOTATE_BENIGN_RACE(pointer, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+        pointer, sizeof(*(pointer)), description)
+
+  /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
+     the memory range [address, address+size). */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+        address, size, description)
+
+  /* Request the analysis tool to ignore all reads in the current thread
+     until ANNOTATE_IGNORE_READS_END is called.
+     Useful to ignore intentional racey reads, while still checking
+     other reads and all writes.
+     See also ANNOTATE_UNPROTECTED_READ. */
+  #define ANNOTATE_IGNORE_READS_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring reads. */
+  #define ANNOTATE_IGNORE_READS_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+  /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring writes. */
+  #define ANNOTATE_IGNORE_WRITES_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+  /* Start ignoring all memory accesses (reads and writes). */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+    do {\
+      ANNOTATE_IGNORE_READS_BEGIN();\
+      ANNOTATE_IGNORE_WRITES_BEGIN();\
+    }while(0)\
+
+  /* Stop ignoring all memory accesses. */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+    do {\
+      ANNOTATE_IGNORE_WRITES_END();\
+      ANNOTATE_IGNORE_READS_END();\
+    }while(0)\
+
+  /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events:
+     RWLOCK* and CONDVAR*. */
+  #define ANNOTATE_IGNORE_SYNC_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring sync events. */
+  #define ANNOTATE_IGNORE_SYNC_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(__FILE__, __LINE__)
+
+
+  /* Enable (enable!=0) or disable (enable==0) race detection for all threads.
+     This annotation could be useful if you want to skip expensive race analysis
+     during some period of program execution, e.g. during initialization. */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(__FILE__, __LINE__, \
+        enable)
+
+  /* -------------------------------------------------------------
+     Annotations useful for debugging. */
+
+  /* Request to trace every access to "address". */
+  #define ANNOTATE_TRACE_MEMORY(address) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(__FILE__, __LINE__, address)
+
+  /* Report the current thread name to a race detector. */
+  #define ANNOTATE_THREAD_NAME(name) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing locks.  They are not
+     normally needed by modules that merely use locks.
+     The "lock" argument is a pointer to the lock object. */
+
+  /* Report that a lock has been created at address "lock". */
+  #define ANNOTATE_RWLOCK_CREATE(lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+  /* Report that the lock at address "lock" is about to be destroyed. */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+  /* Report that the lock at address "lock" has been acquired.
+     is_w=1 for writer lock, is_w=0 for reader lock. */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(__FILE__, __LINE__, lock, \
+        is_w)
+
+  /* Report that the lock at address "lock" is about to be released. */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(__FILE__, __LINE__, lock, \
+        is_w)
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing barriers.  They are not
+     normally needed by modules that merely use barriers.
+     The "barrier" argument is a pointer to the barrier object. */
+
+  /* Report that the "barrier" has been initialized with initial "count".
+   If 'reinitialization_allowed' is true, initialization is allowed to happen
+   multiple times w/o calling barrier_destroy() */
+  #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(__FILE__, __LINE__, barrier, \
+        count, reinitialization_allowed)
+
+  /* Report that we are about to enter barrier_wait("barrier"). */
+  #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(__FILE__, __LINE__, \
+        barrier)
+
+  /* Report that we just exited barrier_wait("barrier"). */
+  #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(__FILE__, __LINE__, \
+        barrier)
+
+  /* Report that the "barrier" has been destroyed. */
+  #define ANNOTATE_BARRIER_DESTROY(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(__FILE__, __LINE__, \
+        barrier)
+
+  /* -------------------------------------------------------------
+     Annotations useful for testing race detectors. */
+
+  /* Report that we expect a race on the variable at "address".
+     Use only in unit tests for a race detector. */
+  #define ANNOTATE_EXPECT_RACE(address, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(__FILE__, __LINE__, address, \
+        description)
+
+  #define ANNOTATE_FLUSH_EXPECTED_RACES() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(__FILE__, __LINE__)
+
+  /* A no-op. Insert where you like to test the interceptors. */
+  #define ANNOTATE_NO_OP(arg) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(__FILE__, __LINE__, arg)
+
+  /* Force the race detector to flush its state. The actual effect depends on
+   * the implementation of the detector. */
+  #define ANNOTATE_FLUSH_STATE() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(__FILE__, __LINE__)
+
+
+#else  /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+  #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
+  #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
+  #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
+  #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
+  #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
+  #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
+  #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
+  #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
+  #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
+  #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
+  #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
+  #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
+  #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size)  /* empty */
+  #define ANNOTATE_SWAP_MEMORY_RANGE(address, size)  /* empty */
+  #define ANNOTATE_PCQ_CREATE(pcq) /* empty */
+  #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
+  #define ANNOTATE_PCQ_PUT(pcq) /* empty */
+  #define ANNOTATE_PCQ_GET(pcq) /* empty */
+  #define ANNOTATE_NEW_MEMORY(address, size) /* empty */
+  #define ANNOTATE_EXPECT_RACE(address, description) /* empty */
+  #define ANNOTATE_FLUSH_EXPECTED_RACES(address, description) /* empty */
+  #define ANNOTATE_BENIGN_RACE(address, description) /* empty */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
+  #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
+  #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
+  #define ANNOTATE_TRACE_MEMORY(arg) /* empty */
+  #define ANNOTATE_THREAD_NAME(name) /* empty */
+  #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_READS_END() /* empty */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_WRITES_END() /* empty */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
+  #define ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_SYNC_END() /* empty */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
+  #define ANNOTATE_NO_OP(arg) /* empty */
+  #define ANNOTATE_FLUSH_STATE() /* empty */
+
+#endif  /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+/* Use the macros above rather than using these functions directly. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
+    const char *file, int line,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
+    const char *file, int line,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
+    const char *file, int line,
+    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
+    const char *file, int line,
+    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
+    const char *file, int line, const volatile void *barrier, long count,
+    long reinitialization_allowed) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
+    const char *file, int line, const volatile void *cv,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
+    const char *file, int line,
+    const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
+    const char *file, int line,
+    const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
+    const char *file, int line,
+    const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
+    const char *file, int line,
+    const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
+    const char *file, int line,
+    const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
+    const char *file, int line,
+    const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
+    const char *file, int line,
+    const volatile void *mem, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
+    const char *file, int line, const volatile void *mem, long size,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
+    const char *file, int line,
+    const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
+    const char *file, int line,
+    const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
+    const char *file, int line,
+    const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
+    const char *file, int line,
+    const char *name) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
+    const char *file, int line, int enable) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
+    const char *file, int line,
+    const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+
+#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
+/* Return non-zero value if running under valgrind.
+
+  If "valgrind.h" is included into dynamic_annotations.c,
+  the regular valgrind mechanism will be used.
+  See http://valgrind.org/docs/manual/manual-core-adv.html about
+  RUNNING_ON_VALGRIND and other valgrind "client requests".
+  The file "valgrind.h" may be obtained by doing
+     svn co svn://svn.valgrind.org/valgrind/trunk/include
+
+  If for some reason you can't use "valgrind.h" or want to fake valgrind,
+  there are two ways to make this function return non-zero:
+    - Use environment variable: export RUNNING_ON_VALGRIND=1
+    - Make your tool intercept the function RunningOnValgrind() and
+      change its return value.
+ */
+int RunningOnValgrind(void) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
+
+  /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+
+     Instead of doing
+        ANNOTATE_IGNORE_READS_BEGIN();
+        ... = x;
+        ANNOTATE_IGNORE_READS_END();
+     one can use
+        ... = ANNOTATE_UNPROTECTED_READ(x); */
+  template <class T>
+  inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) {
+    ANNOTATE_IGNORE_READS_BEGIN();
+    T res = x;
+    ANNOTATE_IGNORE_READS_END();
+    return res;
+  }
+  /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)        \
+    namespace {                                                       \
+      class static_var ## _annotator {                                \
+       public:                                                        \
+        static_var ## _annotator() {                                  \
+          ANNOTATE_BENIGN_RACE_SIZED(&static_var,                     \
+                                      sizeof(static_var),             \
+            # static_var ": " description);                           \
+        }                                                             \
+      };                                                              \
+      static static_var ## _annotator the ## static_var ## _annotator;\
+    }
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+  #define ANNOTATE_UNPROTECTED_READ(x) (x)
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)  /* empty */
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+#endif  /* __DYNAMIC_ANNOTATIONS_H__ */
diff --git a/base/third_party/libevent/event.h b/base/third_party/libevent/event.h
new file mode 100644
index 0000000..d47d797
--- /dev/null
+++ b/base/third_party/libevent/event.h
@@ -0,0 +1,10 @@
+// The Chromium build contains its own checkout of libevent. This stub is used
+// when building the Chrome OS or Android libchrome package to instead use the
+// system headers.
+#if defined(__ANDROID__) || defined(__ANDROID_HOST__)
+#include <event2/event.h>
+#include <event2/event_compat.h>
+#include <event2/event_struct.h>
+#else
+#include <event.h>
+#endif
diff --git a/base/third_party/nspr/prtime.cc b/base/third_party/nspr/prtime.cc
index 88bd47b..97d2c27 100644
--- a/base/third_party/nspr/prtime.cc
+++ b/base/third_party/nspr/prtime.cc
@@ -53,8 +53,8 @@
  * PR_NormalizeTime
  * PR_GMTParameters
  * PR_ImplodeTime
- *   This was modified to use the Win32 SYSTEMTIME/FILETIME structures
- *   and the timezone offsets are applied to the FILETIME structure.
+ *   Upstream implementation from
+ *   http://lxr.mozilla.org/nspr/source/pr/src/misc/prtime.c#221
  * All types and macros are defined in the base/third_party/prtime.h file.
  * These have been copied from the following nspr files. We have only copied
  * over the types we need.
@@ -71,136 +71,10 @@
 #include "base/third_party/nspr/prtime.h"
 #include "build/build_config.h"
 
-#if defined(OS_WIN)
-#include <windows.h>
-#elif defined(OS_MACOSX)
-#include <CoreFoundation/CoreFoundation.h>
-#elif defined(OS_ANDROID)
-#include <ctype.h>
-#include "base/os_compat_android.h"  // For timegm()
-#elif defined(OS_NACL)
-#include "base/os_compat_nacl.h"  // For timegm()
-#endif
 #include <errno.h>  /* for EINVAL */
 #include <time.h>
 
-/* Implements the Unix localtime_r() function for windows */
-#if defined(OS_WIN)
-static void localtime_r(const time_t* secs, struct tm* time) {
-  (void) localtime_s(time, secs);
-}
-#endif
-
 /*
- *------------------------------------------------------------------------
- *
- * PR_ImplodeTime --
- *
- *     Cf. time_t mktime(struct tm *tp)
- *     Note that 1 year has < 2^25 seconds.  So an PRInt32 is large enough.
- *
- *------------------------------------------------------------------------
- */
-PRTime
-PR_ImplodeTime(const PRExplodedTime *exploded)
-{
-    // This is important, we want to make sure multiplications are
-    // done with the correct precision.
-    static const PRTime kSecondsToMicroseconds = static_cast<PRTime>(1000000);
-#if defined(OS_WIN)
-   // Create the system struct representing our exploded time.
-    SYSTEMTIME st = {};
-    FILETIME ft = {};
-    ULARGE_INTEGER uli = {};
-
-    st.wYear = exploded->tm_year;
-    st.wMonth = static_cast<WORD>(exploded->tm_month + 1);
-    st.wDayOfWeek = exploded->tm_wday;
-    st.wDay = static_cast<WORD>(exploded->tm_mday);
-    st.wHour = static_cast<WORD>(exploded->tm_hour);
-    st.wMinute = static_cast<WORD>(exploded->tm_min);
-    st.wSecond = static_cast<WORD>(exploded->tm_sec);
-    st.wMilliseconds = static_cast<WORD>(exploded->tm_usec/1000);
-     // Convert to FILETIME.
-    if (!SystemTimeToFileTime(&st, &ft)) {
-      NOTREACHED() << "Unable to convert time";
-      return 0;
-    }
-    // Apply offsets.
-    uli.LowPart = ft.dwLowDateTime;
-    uli.HighPart = ft.dwHighDateTime;
-    // Convert from Windows epoch to NSPR epoch, and 100-nanoseconds units
-    // to microsecond units.
-    PRTime result =
-        static_cast<PRTime>((uli.QuadPart / 10) - 11644473600000000i64);
-    // Adjust for time zone and dst.  Convert from seconds to microseconds.
-    result -= (exploded->tm_params.tp_gmt_offset +
-               exploded->tm_params.tp_dst_offset) * kSecondsToMicroseconds;
-    // Add microseconds that cannot be represented in |st|.
-    result += exploded->tm_usec % 1000;
-    return result;
-#elif defined(OS_MACOSX)
-    // Create the system struct representing our exploded time.
-    CFGregorianDate gregorian_date;
-    gregorian_date.year = exploded->tm_year;
-    gregorian_date.month = exploded->tm_month + 1;
-    gregorian_date.day = exploded->tm_mday;
-    gregorian_date.hour = exploded->tm_hour;
-    gregorian_date.minute = exploded->tm_min;
-    gregorian_date.second = exploded->tm_sec;
-
-    // Compute |absolute_time| in seconds, correct for gmt and dst
-    // (note the combined offset will be negative when we need to add it), then
-    // convert to microseconds which is what PRTime expects.
-    CFAbsoluteTime absolute_time =
-        CFGregorianDateGetAbsoluteTime(gregorian_date, NULL);
-    PRTime result = static_cast<PRTime>(absolute_time);
-    result -= exploded->tm_params.tp_gmt_offset +
-              exploded->tm_params.tp_dst_offset;
-    result += kCFAbsoluteTimeIntervalSince1970;  // PRTime epoch is 1970
-    result *= kSecondsToMicroseconds;
-    result += exploded->tm_usec;
-    return result;
-#elif defined(OS_POSIX)
-    struct tm exp_tm;
-    memset(&exp_tm, 0, sizeof(exp_tm));
-    exp_tm.tm_sec  = exploded->tm_sec;
-    exp_tm.tm_min  = exploded->tm_min;
-    exp_tm.tm_hour = exploded->tm_hour;
-    exp_tm.tm_mday = exploded->tm_mday;
-    exp_tm.tm_mon  = exploded->tm_month;
-    exp_tm.tm_year = exploded->tm_year - 1900;
-
-    time_t absolute_time = timegm(&exp_tm);
-
-    // If timegm returned -1.  Since we don't pass it a time zone, the only
-    // valid case of returning -1 is 1 second before Epoch (Dec 31, 1969).
-    if (absolute_time == -1 &&
-        !(exploded->tm_year == 1969 && exploded->tm_month == 11 &&
-        exploded->tm_mday == 31 && exploded->tm_hour == 23 &&
-        exploded->tm_min == 59 && exploded->tm_sec == 59)) {
-      // If we get here, time_t must be 32 bits.
-      // Date was possibly too far in the future and would overflow.  Return
-      // the most future date possible (year 2038).
-      if (exploded->tm_year >= 1970)
-        return INT_MAX * kSecondsToMicroseconds;
-      // Date was possibly too far in the past and would underflow.  Return
-      // the most past date possible (year 1901).
-      return INT_MIN * kSecondsToMicroseconds;
-    }
-
-    PRTime result = static_cast<PRTime>(absolute_time);
-    result -= exploded->tm_params.tp_gmt_offset +
-              exploded->tm_params.tp_dst_offset;
-    result *= kSecondsToMicroseconds;
-    result += exploded->tm_usec;
-    return result;
-#else
-#error No PR_ImplodeTime implemented on your platform.
-#endif
-}
-
-/* 
  * The COUNT_LEAPS macro counts the number of leap years passed by
  * till the start of the given year Y.  At the start of the year 4
  * A.D. the number of leap years passed by is 0, while at the start of
@@ -215,9 +89,16 @@
  * midnight 00:00:00.
  */
 
-#define COUNT_LEAPS(Y)   ( ((Y)-1)/4 - ((Y)-1)/100 + ((Y)-1)/400 )
-#define COUNT_DAYS(Y)  ( ((Y)-1)*365 + COUNT_LEAPS(Y) )
-#define DAYS_BETWEEN_YEARS(A, B)  (COUNT_DAYS(B) - COUNT_DAYS(A))
+#define COUNT_LEAPS(Y) (((Y)-1) / 4 - ((Y)-1) / 100 + ((Y)-1) / 400)
+#define COUNT_DAYS(Y) (((Y)-1) * 365 + COUNT_LEAPS(Y))
+#define DAYS_BETWEEN_YEARS(A, B) (COUNT_DAYS(B) - COUNT_DAYS(A))
+
+/* Implements the Unix localtime_r() function for windows */
+#if defined(OS_WIN)
+static void localtime_r(const time_t* secs, struct tm* time) {
+  (void) localtime_s(time, secs);
+}
+#endif
 
 /*
  * Static variables used by functions in this file
@@ -243,6 +124,56 @@
 };
 
 /*
+ *------------------------------------------------------------------------
+ *
+ * PR_ImplodeTime --
+ *
+ *     Cf. time_t mktime(struct tm *tp)
+ *     Note that 1 year has < 2^25 seconds.  So an PRInt32 is large enough.
+ *
+ *------------------------------------------------------------------------
+ */
+PRTime
+PR_ImplodeTime(const PRExplodedTime *exploded)
+{
+  PRExplodedTime copy;
+  PRTime retVal;
+  PRInt64 secPerDay, usecPerSec;
+  PRInt64 temp;
+  PRInt64 numSecs64;
+  PRInt32 numDays;
+  PRInt32 numSecs;
+
+  /* Normalize first.  Do this on our copy */
+  copy = *exploded;
+  PR_NormalizeTime(&copy, PR_GMTParameters);
+
+  numDays = DAYS_BETWEEN_YEARS(1970, copy.tm_year);
+
+  numSecs = copy.tm_yday * 86400 + copy.tm_hour * 3600 + copy.tm_min * 60 +
+            copy.tm_sec;
+
+  LL_I2L(temp, numDays);
+  LL_I2L(secPerDay, 86400);
+  LL_MUL(temp, temp, secPerDay);
+  LL_I2L(numSecs64, numSecs);
+  LL_ADD(numSecs64, numSecs64, temp);
+
+  /* apply the GMT and DST offsets */
+  LL_I2L(temp, copy.tm_params.tp_gmt_offset);
+  LL_SUB(numSecs64, numSecs64, temp);
+  LL_I2L(temp, copy.tm_params.tp_dst_offset);
+  LL_SUB(numSecs64, numSecs64, temp);
+
+  LL_I2L(usecPerSec, 1000000L);
+  LL_MUL(temp, numSecs64, usecPerSec);
+  LL_I2L(retVal, copy.tm_usec);
+  LL_ADD(retVal, retVal, temp);
+
+  return retVal;
+}
+
+/*
  *-------------------------------------------------------------------------
  *
  * IsLeapYear --
@@ -448,7 +379,7 @@
  */
 
 PRTimeParameters
-PR_GMTParameters(const PRExplodedTime* /* gmt */)
+PR_GMTParameters(const PRExplodedTime* /*gmt*/)
 {
     PRTimeParameters retVal = { 0, 0 };
     return retVal;
diff --git a/base/third_party/nspr/prtime.h b/base/third_party/nspr/prtime.h
index 01a4e54..20bae38 100644
--- a/base/third_party/nspr/prtime.h
+++ b/base/third_party/nspr/prtime.h
@@ -73,6 +73,17 @@
 #define PR_INT16_MAX 32767
 #define NSPR_API(__type) extern __type
 
+/*
+ * Long-long (64-bit signed integer type) support macros used by
+ * PR_ImplodeTime().
+ * See http://lxr.mozilla.org/nspr/source/pr/include/prlong.h
+ */
+
+#define LL_I2L(l, i) ((l) = (PRInt64)(i))
+#define LL_MUL(r, a, b) ((r) = (a) * (b))
+#define LL_ADD(r, a, b) ((r) = (a) + (b))
+#define LL_SUB(r, a, b) ((r) = (a) - (b))
+
 /**********************************************************************/
 /************************* TYPES AND CONSTANTS ************************/
 /**********************************************************************/
diff --git a/base/third_party/valgrind/memcheck.h b/base/third_party/valgrind/memcheck.h
new file mode 100644
index 0000000..aac34fc
--- /dev/null
+++ b/base/third_party/valgrind/memcheck.h
@@ -0,0 +1,282 @@
+#ifdef ANDROID
+  #include "memcheck/memcheck.h"
+#else
+/*
+   ----------------------------------------------------------------
+
+   Notice that the following BSD-style license applies to this one
+   file (memcheck.h) only.  The rest of Valgrind is licensed under the
+   terms of the GNU General Public License, version 2, unless
+   otherwise indicated.  See the COPYING file in the source
+   distribution for details.
+
+   ----------------------------------------------------------------
+
+   This file is part of MemCheck, a heavyweight Valgrind tool for
+   detecting memory errors.
+
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   ----------------------------------------------------------------
+
+   Notice that the above BSD-style license applies to this one file
+   (memcheck.h) only.  The entire rest of Valgrind is licensed under
+   the terms of the GNU General Public License, version 2.  See the
+   COPYING file in the source distribution for details.
+
+   ---------------------------------------------------------------- 
+*/
+
+
+#ifndef __MEMCHECK_H
+#define __MEMCHECK_H
+
+
+/* This file is for inclusion into client (your!) code.
+
+   You can use these macros to manipulate and query memory permissions
+   inside your own programs.
+
+   See comment near the top of valgrind.h on how to use them.
+*/
+
+#include "valgrind.h"
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
+   This enum comprises an ABI exported by Valgrind to programs
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+   enum { 
+      VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
+      VG_USERREQ__MAKE_MEM_UNDEFINED,
+      VG_USERREQ__MAKE_MEM_DEFINED,
+      VG_USERREQ__DISCARD,
+      VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+      VG_USERREQ__CHECK_MEM_IS_DEFINED,
+      VG_USERREQ__DO_LEAK_CHECK,
+      VG_USERREQ__COUNT_LEAKS,
+
+      VG_USERREQ__GET_VBITS,
+      VG_USERREQ__SET_VBITS,
+
+      VG_USERREQ__CREATE_BLOCK,
+
+      VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
+
+      /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
+      VG_USERREQ__COUNT_LEAK_BLOCKS,
+
+      /* This is just for memcheck's internal use - don't use it */
+      _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR 
+         = VG_USERREQ_TOOL_BASE('M','C') + 256
+   } Vg_MemCheckClientRequest;
+
+
+
+/* Client-code macros to manipulate the state of memory. */
+
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len)           \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_NOACCESS,       \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+      
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
+   for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len)          \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_UNDEFINED,      \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similarly, mark memory at _qzz_addr as addressable and defined
+   for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len)            \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_DEFINED,        \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
+   not altered: bytes which are addressable are marked as defined,
+   but those which are not addressable are left unchanged. */
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len)     \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,              \
+                            VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Create a block-description handle.  The description is an ascii
+   string which is included in any messages pertaining to addresses
+   within the specified memory range.  Has no other effect on the
+   properties of the memory range. */
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc)	   \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,        \
+                            VG_USERREQ__CREATE_BLOCK,              \
+                            (_qzz_addr), (_qzz_len), (_qzz_desc),  \
+                            0, 0)
+
+/* Discard a block-description-handle. Returns 1 for an
+   invalid handle, 0 for a valid handle. */
+#define VALGRIND_DISCARD(_qzz_blkindex)                          \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__DISCARD,                 \
+                            0, (_qzz_blkindex), 0, 0, 0)
+
+
+/* Client-code macros to check the state of memory. */
+
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
+   If suitable addressibility is not established, Valgrind prints an
+   error message and returns the address of the first offending byte.
+   Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len)      \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                             \
+                            VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,  \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Check that memory at _qzz_addr is addressable and defined for
+   _qzz_len bytes.  If suitable addressibility and definedness are not
+   established, Valgrind prints an error message and returns the
+   address of the first offending byte.  Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len)        \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                           \
+                            VG_USERREQ__CHECK_MEM_IS_DEFINED,    \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Use this macro to force the definedness and addressibility of an
+   lvalue to be checked.  If suitable addressibility and definedness
+   are not established, Valgrind prints an error message and returns
+   the address of the first offending byte.  Otherwise it returns
+   zero. */
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue)                \
+   VALGRIND_CHECK_MEM_IS_DEFINED(                                \
+      (volatile unsigned char *)&(__lvalue),                     \
+                      (unsigned long)(sizeof (__lvalue)))
+
+
+/* Do a full memory leak check (like --leak-check=full) mid-execution. */
+#define VALGRIND_DO_LEAK_CHECK                                   \
+   {unsigned long _qzz_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
+                            VG_USERREQ__DO_LEAK_CHECK,           \
+                            0, 0, 0, 0, 0);                      \
+   }
+
+/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
+#define VALGRIND_DO_QUICK_LEAK_CHECK				 \
+   {unsigned long _qzz_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
+                            VG_USERREQ__DO_LEAK_CHECK,           \
+                            1, 0, 0, 0, 0);                      \
+   }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+   all previous leak checks.  They must be lvalues.  */
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed)     \
+   /* For safety on 64-bit platforms we assign the results to private
+      unsigned long variables, then assign these to the lvalues the user
+      specified, which works no matter what type 'leaked', 'dubious', etc
+      are.  We also initialise '_qzz_leaked', etc because
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+      defined. */                                                        \
+   {unsigned long _qzz_res;                                              \
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
+                               VG_USERREQ__COUNT_LEAKS,                  \
+                               &_qzz_leaked, &_qzz_dubious,              \
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
+    leaked     = _qzz_leaked;                                            \
+    dubious    = _qzz_dubious;                                           \
+    reachable  = _qzz_reachable;                                         \
+    suppressed = _qzz_suppressed;                                        \
+   }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+   all previous leak checks.  They must be lvalues.  */
+#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
+   /* For safety on 64-bit platforms we assign the results to private
+      unsigned long variables, then assign these to the lvalues the user
+      specified, which works no matter what type 'leaked', 'dubious', etc
+      are.  We also initialise '_qzz_leaked', etc because
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+      defined. */                                                        \
+   {unsigned long _qzz_res;                                              \
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
+                               VG_USERREQ__COUNT_LEAK_BLOCKS,            \
+                               &_qzz_leaked, &_qzz_dubious,              \
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
+    leaked     = _qzz_leaked;                                            \
+    dubious    = _qzz_dubious;                                           \
+    reachable  = _qzz_reachable;                                         \
+    suppressed = _qzz_suppressed;                                        \
+   }
+
+
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
+   into the provided zzvbits array.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   [previously indicated unaligned arrays;  these are now allowed]
+      3   if any parts of zzsrc/zzvbits are not addressable.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes)                \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                          \
+                                    VG_USERREQ__GET_VBITS,      \
+                                    (const char*)(zza),         \
+                                    (char*)(zzvbits),           \
+                                    (zznbytes), 0, 0)
+
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
+   from the provided zzvbits array.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   [previously indicated unaligned arrays;  these are now allowed]
+      3   if any parts of zza/zzvbits are not addressable.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes)                \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                          \
+                                    VG_USERREQ__SET_VBITS,      \
+                                    (const char*)(zza),         \
+                                    (const char*)(zzvbits),     \
+                                    (zznbytes), 0, 0 )
+
+#endif
+
+#endif
diff --git a/base/third_party/valgrind/valgrind.h b/base/third_party/valgrind/valgrind.h
new file mode 100644
index 0000000..0668a71
--- /dev/null
+++ b/base/third_party/valgrind/valgrind.h
@@ -0,0 +1,4797 @@
+#ifdef ANDROID
+  #include "include/valgrind.h"
+#else
+/* -*- c -*-
+   ----------------------------------------------------------------
+
+   Notice that the following BSD-style license applies to this one
+   file (valgrind.h) only.  The rest of Valgrind is licensed under the
+   terms of the GNU General Public License, version 2, unless
+   otherwise indicated.  See the COPYING file in the source
+   distribution for details.
+
+   ----------------------------------------------------------------
+
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   ----------------------------------------------------------------
+
+   Notice that the above BSD-style license applies to this one file
+   (valgrind.h) only.  The entire rest of Valgrind is licensed under
+   the terms of the GNU General Public License, version 2.  See the
+   COPYING file in the source distribution for details.
+
+   ---------------------------------------------------------------- 
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+   You can use these macros to manipulate and query Valgrind's 
+   execution inside your own programs.
+
+   The resulting executables will still run without Valgrind, just a
+   little bit more slowly than they otherwise would, but otherwise
+   unchanged.  When not running on valgrind, each client request
+   consumes very few (eg. 7) instructions, so the resulting performance
+   loss is negligible unless you plan to execute client requests
+   millions of times per second.  Nevertheless, if that is still a
+   problem, you can compile with the NVALGRIND symbol defined (gcc
+   -DNVALGRIND) so that client requests are not even compiled in.  */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND                                         */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+   conditionally compile based on our version number.  Note that these
+   were introduced at version 3.6 and so do not exist in version 3.5
+   or earlier.  The recommended way to use them to check for "version
+   X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__)   \
+    && (__VALGRIND_MAJOR__ > 3                                   \
+        || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__    3
+#define __VALGRIND_MINOR__    6
+
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi.  So
+   we can't use C++ style "//" comments nor the "asm" keyword (instead
+   use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is.  Note
+   that in this file we're using the compiler's CPP symbols for
+   identifying architectures, which are different to the ones we use
+   within the rest of Valgrind.  Note, __powerpc__ is active for both
+   32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+   latter (on Linux, that is).
+
+   Misc note: how to find out what's predefined in gcc by default:
+   gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_ppc64_aix5
+#undef PLAT_ppc32_aix5
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+
+#if defined(_AIX) && defined(__64BIT__)
+#  define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+#  define PLAT_ppc32_aix5 1
+#elif defined(__APPLE__) && defined(__i386__)
+#  define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+#  define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) || defined(_WIN32) && defined(_M_IX86)
+#  define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
+#  define PLAT_x86_linux 1
+#elif defined(__linux__) && defined(__x86_64__)
+#  define PLAT_amd64_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
+#  define PLAT_ppc32_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
+#  define PLAT_ppc64_linux 1
+#elif defined(__linux__) && defined(__arm__)
+#  define PLAT_arm_linux 1
+#else
+/* If we're not compiling for our target platform, don't generate
+   any inline asms.  */
+#  if !defined(NVALGRIND)
+#    define NVALGRIND 1
+#  endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS.  There is nothing */
+/* in here of use to end-users -- skip to the next section.           */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+   from the compiled code (analogous to NDEBUG's effects on
+   assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+   {                                                              \
+      (_zzq_rlval) = (_zzq_default);                              \
+   }
+
+#else  /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+   spots and handles magically.  Don't look too closely at them as
+   they will rot your brain.
+
+   The assembly code sequences for all architectures is in this one
+   file.  This is because this file must be stand-alone, and we don't
+   want to have multiple files.
+
+   For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+   value gets put in the return slot, so that everything works when
+   this is executed not under Valgrind.  Args are passed in a memory
+   block, and so there's no intrinsic limit to the number that could
+   be passed, but it's currently five.
+   
+   The macro args are: 
+      _zzq_rlval    result lvalue
+      _zzq_default  default value (result returned when running on real CPU)
+      _zzq_request  request code
+      _zzq_arg1..5  request params
+
+   The other two macros are used to support function wrapping, and are
+   a lot simpler.  VALGRIND_GET_NR_CONTEXT returns the value of the
+   guest's NRADDR pseudo-register and whatever other information is
+   needed to safely run the call original from the wrapper: on
+   ppc64-linux, the R2 value at the divert point is also needed.  This
+   information is abstracted into a user-visible type, OrigFn.
+
+   VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+   guest, but guarantees that the branch instruction will not be
+   redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+   branch-and-link-to-r11.  VALGRIND_CALL_NOREDIR is just text, not a
+   complete inline asm, since it needs to be combined with more magic
+   inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)  \
+    ||  (defined(PLAT_x86_win32) && defined(__GNUC__))
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "roll $3,  %%edi ; roll $13, %%edi\n\t"      \
+                     "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned int _zzq_args[6];                           \
+    volatile unsigned int _zzq_result;                            \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EDX = client_request ( %EAX ) */         \
+                     "xchgl %%ebx,%%ebx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EAX = guest_NRADDR */                    \
+                     "xchgl %%ecx,%%ecx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%EAX */                     \
+                     "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
+
+/* ------------------------- x86-Win32 ------------------------- */
+
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     __asm rol edi, 3  __asm rol edi, 13          \
+                     __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile uintptr_t _zzq_args[6];                              \
+    volatile unsigned int _zzq_result;                            \
+    _zzq_args[0] = (uintptr_t)(_zzq_request);                     \
+    _zzq_args[1] = (uintptr_t)(_zzq_arg1);                        \
+    _zzq_args[2] = (uintptr_t)(_zzq_arg2);                        \
+    _zzq_args[3] = (uintptr_t)(_zzq_arg3);                        \
+    _zzq_args[4] = (uintptr_t)(_zzq_arg4);                        \
+    _zzq_args[5] = (uintptr_t)(_zzq_arg5);                        \
+    __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default  \
+            __SPECIAL_INSTRUCTION_PREAMBLE                        \
+            /* %EDX = client_request ( %EAX ) */                  \
+            __asm xchg ebx,ebx                                    \
+            __asm mov _zzq_result, edx                            \
+    }                                                             \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm { __SPECIAL_INSTRUCTION_PREAMBLE                        \
+            /* %EAX = guest_NRADDR */                             \
+            __asm xchg ecx,ecx                                    \
+            __asm mov __addr, eax                                 \
+    }                                                             \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rolq $3,  %%rdi ; rolq $13, %%rdi\n\t"      \
+                     "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned long long int _zzq_args[6];                 \
+    volatile unsigned long long int _zzq_result;                  \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RDX = client_request ( %RAX ) */         \
+                     "xchgq %%rbx,%%rbx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RAX = guest_NRADDR */                    \
+                     "xchgq %%rcx,%%rcx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_RAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%RAX */                     \
+                     "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[6];                          \
+             unsigned int  _zzq_result;                           \
+             unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 3,%1\n\t" /*default*/                    \
+                     "mr 4,%2\n\t" /*ptr*/                        \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"     /*result*/                     \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_default), "b" (_zzq_ptr)         \
+                     : "cc", "memory", "r3", "r4");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    unsigned int __addr;                                          \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory", "r3"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[6];                \
+    register unsigned long long int  _zzq_result __asm__("r3");   \
+    register unsigned long long int* _zzq_ptr __asm__("r4");      \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1"                                   \
+                     : "=r" (_zzq_result)                         \
+                     : "0" (_zzq_default), "r" (_zzq_ptr)         \
+                     : "cc", "memory");                           \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr __asm__("r3");         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+            "mov r12, r12, ror #3  ; mov r12, r12, ror #13 \n\t"  \
+            "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  { volatile unsigned int  _zzq_args[6];                          \
+    volatile unsigned int  _zzq_result;                           \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    __asm__ volatile("mov r3, %1\n\t" /*default*/                 \
+                     "mov r4, %2\n\t" /*ptr*/                     \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* R3 = client_request ( R4 ) */             \
+                     "orr r10, r10, r10\n\t"                      \
+                     "mov %0, r3"     /*result*/                  \
+                     : "=r" (_zzq_result)                         \
+                     : "r" (_zzq_default), "r" (&_zzq_args[0])    \
+                     : "cc","memory", "r3", "r4");                \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    unsigned int __addr;                                          \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* R3 = guest_NRADDR */                      \
+                     "orr r11, r11, r11\n\t"                      \
+                     "mov %0, r3"                                 \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory", "r3"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                    \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R4 */        \
+                     "orr r12, r12, r12\n\t"
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+      unsigned int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[7];                          \
+    register unsigned int  _zzq_result;                           \
+    register unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_args[6] = (unsigned int)(_zzq_default);                  \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "lwz 3, 24(4)\n\t"                           \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[7];                \
+    register unsigned long long int  _zzq_result;                 \
+    register unsigned long long int* _zzq_ptr;                    \
+    _zzq_args[0] = (unsigned int long long)(_zzq_request);        \
+    _zzq_args[1] = (unsigned int long long)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned int long long)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned int long long)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned int long long)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned int long long)(_zzq_arg5);           \
+    _zzq_args[6] = (unsigned int long long)(_zzq_default);        \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "ld 3, 48(4)\n\t"                            \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING.  This is all very        */
+/* ugly.  It's the least-worst tradeoff I can think of.               */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+   guaranteed-no-redirection macros, so as to get from function
+   wrappers to the functions they are wrapping.  The whole point is to
+   construct standard call sequences, but to do the call itself with a
+   special no-redirect call pseudo-instruction that the JIT
+   understands and handles specially.  This section is long and
+   repetitious, and I can't see a way to make it shorter.
+
+   The naming scheme is as follows:
+
+      CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+   'W' stands for "word" and 'v' for "void".  Hence there are
+   different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+   and for each, the possibility of returning a word-typed result, or
+   no result.
+*/
+
+/* Use these to write the name of your wrapper.  NOTE: duplicates
+   VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+   args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname)                    \
+   VG_CONCAT4(_vgwZU_,soname,_,fnname)
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname)                    \
+   VG_CONCAT4(_vgwZZ_,soname,_,fnname)
+
+/* Use this macro from within a wrapper function to collect the
+   context (address and possibly other info) of the original function.
+   Once you have that you can then use it in one of the CALL_FN_
+   macros.  The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval)  VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+   returning void. */
+
+#define CALL_FN_v_v(fnptr)                                        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1)                                  \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2)                            \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3)                      \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4)                \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5)             \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6)        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7)   \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)
+
+/* These regs are trashed by the hidden call.  No need to mention eax
+   as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "pushl 48(%%eax)\n\t"                                    \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi",       \
+                            "rdi", "r8", "r9", "r10", "r11"
+
+/* This is all pretty complex.  It's so as to make stack unwinding
+   work reliably.  See bug 243270.  The basic problem is the sub and
+   add of 128 of %rsp in all of the following macros.  If gcc believes
+   the CFA is in %rsp, then unwinding may fail, because what's at the
+   CFA is not what gcc "expected" when it constructs the CFIs for the
+   places where the macros are instantiated.
+
+   But we can't just add a CFI annotation to increase the CFA offset
+   by 128, to match the sub of 128 from %rsp, because we don't know
+   whether gcc has chosen %rsp as the CFA at that point, or whether it
+   has chosen some other register (eg, %rbp).  In the latter case,
+   adding a CFI annotation to change the CFA offset is simply wrong.
+
+   So the solution is to get hold of the CFA using
+   __builtin_dwarf_cfa(), put it in a known register, and add a
+   CFI annotation to say what the register is.  We choose %rbp for
+   this (perhaps perversely), because:
+
+   (1) %rbp is already subject to unwinding.  If a new register was
+       chosen then the unwinder would have to unwind it in all stack
+       traces, which is expensive, and
+
+   (2) %rbp is already subject to precise exception updates in the
+       JIT.  If a new register was chosen, we'd have to have precise
+       exceptions for it too, which reduces performance of the
+       generated code.
+
+   However .. one extra complication.  We can't just whack the result
+   of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+   list of trashed registers at the end of the inline assembly
+   fragments; gcc won't allow %rbp to appear in that list.  Hence
+   instead we need to stash %rbp in %r15 for the duration of the asm,
+   and say that %r15 is trashed instead.  gcc seems happy to go with
+   that.
+
+   Oh .. and this all needs to be conditionalised so that it is
+   unchanged from before this commit, when compiled with older gccs
+   that don't support __builtin_dwarf_cfa.  Furthermore, since
+   this header file is freestanding, it has to be independent of
+   config.h, and so the following conditionalisation cannot depend on
+   configure time checks.
+
+   Although it's not clear from
+   'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+   this expression excludes Darwin.
+   .cfi directives in Darwin assembly appear to be completely
+   different and I haven't investigated how they work.
+
+   For even more entertainment value, note we have to use the
+   completely undocumented __builtin_dwarf_cfa(), which appears to
+   really compute the CFA, whereas __builtin_frame_address(0) claims
+   to but actually doesn't.  See
+   https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+#  define __FRAME_POINTER                                         \
+      ,"r"(__builtin_dwarf_cfa())
+#  define VALGRIND_CFI_PROLOGUE                                   \
+      "movq %%rbp, %%r15\n\t"                                     \
+      "movq %2, %%rbp\n\t"                                        \
+      ".cfi_remember_state\n\t"                                   \
+      ".cfi_def_cfa rbp, 0\n\t"
+#  define VALGRIND_CFI_EPILOGUE                                   \
+      "movq %%r15, %%rbp\n\t"                                     \
+      ".cfi_restore_state\n\t"
+#else
+#  define __FRAME_POINTER
+#  define VALGRIND_CFI_PROLOGUE
+#  define VALGRIND_CFI_EPILOGUE
+#endif
+
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+   long) == 8. */
+
+/* NB 9 Sept 07.  There is a nasty kludge here in all these CALL_FN_
+   macros.  In order not to trash the stack redzone, we need to drop
+   %rsp by 128 before the hidden call, and restore afterwards.  The
+   nastyness is that it is only by luck that the stack still appears
+   to be unwindable during the hidden call - since then the behaviour
+   of any routine using this macro does not match what the CFI data
+   says.  Sigh.
+
+   Why is this important?  Imagine that a wrapper has a stack
+   allocated local, and passes to the hidden call, a pointer to it.
+   Because gcc does not know about the hidden call, it may allocate
+   that local in the redzone.  Unfortunately the hidden call may then
+   trash it before it comes to use it.  So we must step clear of the
+   redzone, for the duration of the hidden call, to make it safe.
+
+   Probably the same problem afflicts the other redzone-style ABIs too
+   (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+   self describing (none of this CFI nonsense) so at least messing
+   with the stack pointer doesn't give a danger of non-unwindable
+   stack. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $8, %%rsp\n"                                       \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $16, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $24, %%rsp\n"                                      \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $32, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $40, %%rsp\n"                                      \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 96(%%rax)\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $48, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+   extern int f9  ( int,int,int,int,int,int,int,int,int );
+   extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+   extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+   extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+   int g9 ( void ) {
+      return f9(11,22,33,44,55,66,77,88,99);
+   }
+   int g10 ( void ) {
+      return f10(11,22,33,44,55,66,77,88,99,110);
+   }
+   int g11 ( void ) {
+      return f11(11,22,33,44,55,66,77,88,99,110,121);
+   }
+   int g12 ( void ) {
+      return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+   }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux, 
+   sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      _argvec[12] = (unsigned long)arg12;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,20(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
+
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",  __CALLER_SAVED_REGS         \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #4 \n\t"                                    \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #8 \n\t"                                    \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #12 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "push {r0, r1, r2, r3} \n\t"                             \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #16 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #20 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #24 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #28 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS           \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "ldr r2, [%1, #48] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #32 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "lwz  3," #_n_fr "(1)\n\t"                               \
+         "stw  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t" /* arg2->r4 */                       \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,68(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "ld   3," #_n_fr "(1)\n\t"                               \
+         "std  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS.               */
+/*                                                                    */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes.  There are many more of these, but most are not
+   exposed to end-user view.  These are the public ones, all of the
+   form 0x1000 + small_number.
+
+   Core ones are in the range 0x00000000--0x0000ffff.  The non-public
+   ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+   embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+   ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+   (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
+   This enum comprises an ABI exported by Valgrind to programs
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+   enum { VG_USERREQ__RUNNING_ON_VALGRIND  = 0x1001,
+          VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+          /* These allow any function to be called from the simulated
+             CPU but run on the real CPU.  Nb: the first arg passed to
+             the function is always the ThreadId of the running
+             thread!  So CLIENT_CALL0 actually requires a 1 arg
+             function, etc. */
+          VG_USERREQ__CLIENT_CALL0 = 0x1101,
+          VG_USERREQ__CLIENT_CALL1 = 0x1102,
+          VG_USERREQ__CLIENT_CALL2 = 0x1103,
+          VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+          /* Can be useful in regression testing suites -- eg. can
+             send Valgrind's output to /dev/null and still count
+             errors. */
+          VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+          /* These are useful and can be interpreted by any tool that
+             tracks malloc() et al, by using vg_replace_malloc.c. */
+          VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+          VG_USERREQ__FREELIKE_BLOCK   = 0x1302,
+          /* Memory pool support. */
+          VG_USERREQ__CREATE_MEMPOOL   = 0x1303,
+          VG_USERREQ__DESTROY_MEMPOOL  = 0x1304,
+          VG_USERREQ__MEMPOOL_ALLOC    = 0x1305,
+          VG_USERREQ__MEMPOOL_FREE     = 0x1306,
+          VG_USERREQ__MEMPOOL_TRIM     = 0x1307,
+          VG_USERREQ__MOVE_MEMPOOL     = 0x1308,
+          VG_USERREQ__MEMPOOL_CHANGE   = 0x1309,
+          VG_USERREQ__MEMPOOL_EXISTS   = 0x130a,
+
+          /* Allow printfs to valgrind log. */
+          /* The first two pass the va_list argument by value, which
+             assumes it is the same size as or smaller than a UWord,
+             which generally isn't the case.  Hence are deprecated.
+             The second two pass the vargs by reference and so are
+             immune to this problem. */
+          /* both :: char* fmt, va_list vargs (DEPRECATED) */
+          VG_USERREQ__PRINTF           = 0x1401,
+          VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+          /* both :: char* fmt, va_list* vargs */
+          VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+          VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
+
+          /* Stack support. */
+          VG_USERREQ__STACK_REGISTER   = 0x1501,
+          VG_USERREQ__STACK_DEREGISTER = 0x1502,
+          VG_USERREQ__STACK_CHANGE     = 0x1503,
+
+          /* Wine support */
+          VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+          /* Querying of debug info. */
+          VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
+   } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+#  define __extension__ /* */
+#endif
+
+
+/*
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result.
+ */
+
+#if defined(NVALGRIND)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                               \
+        _zzq_default, _zzq_request,                                    \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)         \
+   (_zzq_default)
+
+#else /*defined(NVALGRIND)*/
+
+#if defined(_MSC_VER)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                                \
+        _zzq_default, _zzq_request,                                     \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)          \
+   (vg_VALGRIND_DO_CLIENT_REQUEST_EXPR((uintptr_t)(_zzq_default),       \
+        (_zzq_request), (uintptr_t)(_zzq_arg1), (uintptr_t)(_zzq_arg2), \
+        (uintptr_t)(_zzq_arg3), (uintptr_t)(_zzq_arg4),                 \
+        (uintptr_t)(_zzq_arg5)))
+
+static __inline unsigned
+vg_VALGRIND_DO_CLIENT_REQUEST_EXPR(uintptr_t _zzq_default,
+                                   unsigned _zzq_request, uintptr_t _zzq_arg1,
+                                   uintptr_t _zzq_arg2, uintptr_t _zzq_arg3,
+                                   uintptr_t _zzq_arg4, uintptr_t _zzq_arg5)
+{
+    unsigned _zzq_rlval;
+    VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request,
+                      _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5);
+    return _zzq_rlval;
+}
+
+#else /*defined(_MSC_VER)*/
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                               \
+        _zzq_default, _zzq_request,                                    \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)         \
+   (__extension__({unsigned int _zzq_rlval;                            \
+    VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request, \
+                _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+    _zzq_rlval;                                                        \
+   }))
+
+#endif /*defined(_MSC_VER)*/
+
+#endif /*defined(NVALGRIND)*/
+
+
+/* Returns the number of Valgrinds this code is running under.  That
+   is, 0 if running natively, 1 if running under Valgrind, 2 if
+   running under Valgrind which is running under another Valgrind,
+   etc. */
+#define RUNNING_ON_VALGRIND                                           \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */,                   \
+                                    VG_USERREQ__RUNNING_ON_VALGRIND,  \
+                                    0, 0, 0, 0, 0)                    \
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+   _qzz_len - 1].  Useful if you are debugging a JITter or some such,
+   since it provides a way to make sure valgrind will retranslate the
+   invalidated area.  Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DISCARD_TRANSLATIONS,  \
+                               _qzz_addr, _qzz_len, 0, 0, 0);     \
+   }
+
+
+/* These requests are for getting Valgrind itself to print something.
+   Possibly with a backtrace.  This is a really ugly hack.  The return value
+   is the number of characters printed, excluding the "**<pid>** " part at the
+   start and the backtrace (if present). */
+
+#if defined(NVALGRIND)
+
+#  define VALGRIND_PRINTF(...)
+#  define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+#if !defined(_MSC_VER)
+/* Modern GCC will optimize the static routine out if unused,
+   and unused attribute will shut down warnings about it.  */
+static int VALGRIND_PRINTF(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+#if defined(_MSC_VER)
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
+                              0, 0, 0);
+#else
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+#if !defined(_MSC_VER)
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+#if defined(_MSC_VER)
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
+                              0, 0, 0);
+#else
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+   real CPU, calling an arbitary function.
+   
+   Note that the current ThreadId is inserted as the first argument.
+   So this call:
+
+     VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+   requires f to have this signature:
+
+     Word f(Word tid, Word arg1, Word arg2)
+
+   where "Word" is a word-sized type.
+
+   Note that these client requests are not entirely reliable.  For example,
+   if you call a function with them that subsequently calls printf(),
+   there's a high chance Valgrind will crash.  Generally, your prospects of
+   these working are made higher if the called function does not refer to
+   any global variables, and does not refer to any libc or other functions
+   (printf et al).  Any kind of entanglement with libc or dynamic linking is
+   likely to have a bad outcome, for tricky reasons which we've grappled
+   with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn)                          \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL0,          \
+                               _qyy_fn,                           \
+                               0, 0, 0, 0);                       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1)               \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL1,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, 0, 0, 0);               \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2)    \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL2,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2, 0, 0);       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL3,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2,              \
+                               _qyy_arg3, 0);                     \
+    _qyy_res;                                                     \
+   })
+
+
+/* Counts the number of errors that have been recorded by a tool.  Nb:
+   the tool must record the errors with VG_(maybe_record_error)() or
+   VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS                                     \
+   __extension__                                                  \
+   ({unsigned int _qyy_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__COUNT_ERRORS,          \
+                               0, 0, 0, 0, 0);                    \
+    _qyy_res;                                                     \
+   })
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+   when heap blocks are allocated in order to give accurate results.  This
+   happens automatically for the standard allocator functions such as
+   malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+   delete[], etc.
+
+   But if your program uses a custom allocator, this doesn't automatically
+   happen, and Valgrind will not do as well.  For example, if you allocate
+   superblocks with mmap() and then allocates chunks of the superblocks, all
+   Valgrind's observations will be at the mmap() level and it won't know that
+   the chunks should be considered separate entities.  In Memcheck's case,
+   that means you probably won't get heap block overrun detection (because
+   there won't be redzones marked as unaddressable) and you definitely won't
+   get any leak detection.
+
+   The following client requests allow a custom allocator to be annotated so
+   that it can be handled accurately by Valgrind.
+
+   VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+   by a malloc()-like function.  For Memcheck (an illustrative case), this
+   does two things:
+
+   - It records that the block has been allocated.  This means any addresses
+     within the block mentioned in error messages will be
+     identified as belonging to the block.  It also means that if the block
+     isn't freed it will be detected by the leak checker.
+
+   - It marks the block as being addressable and undefined (if 'is_zeroed' is
+     not set), or addressable and defined (if 'is_zeroed' is set).  This
+     controls how accesses to the block by the program are handled.
+   
+   'addr' is the start of the usable block (ie. after any
+   redzone), 'sizeB' is its size.  'rzB' is the redzone size if the allocator
+   can apply redzones -- these are blocks of padding at the start and end of
+   each block.  Adding redzones is recommended as it makes it much more likely
+   Valgrind will spot block overruns.  `is_zeroed' indicates if the memory is
+   zeroed (or filled with another predictable value), as is the case for
+   calloc().
+   
+   VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+   heap block -- that will be used by the client program -- is allocated.
+   It's best to put it at the outermost level of the allocator if possible;
+   for example, if you have a function my_alloc() which calls
+   internal_alloc(), and the client request is put inside internal_alloc(),
+   stack traces relating to the heap block will contain entries for both
+   my_alloc() and internal_alloc(), which is probably not what you want.
+
+   For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+   custom blocks from within a heap block, B, that has been allocated with
+   malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+   -- the custom blocks will take precedence.
+
+   VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK.  For
+   Memcheck, it does two things:
+
+   - It records that the block has been deallocated.  This assumes that the
+     block was annotated as having been allocated via
+     VALGRIND_MALLOCLIKE_BLOCK.  Otherwise, an error will be issued.
+
+   - It marks the block as being unaddressable.
+
+   VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+   heap block is deallocated.
+
+   In many cases, these two client requests will not be enough to get your
+   allocator working well with Memcheck.  More specifically, if your allocator
+   writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+   will be necessary to mark the memory as addressable just before the zeroing
+   occurs, otherwise you'll get a lot of invalid write errors.  For example,
+   you'll need to do this if your allocator recycles freed blocks, but it
+   zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+   Alternatively, if your allocator reuses freed blocks for allocator-internal
+   data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+   Really, what's happening is a blurring of the lines between the client
+   program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+   memory should be considered unaddressable to the client program, but the
+   allocator knows more than the rest of the client program and so may be able
+   to safely access it.  Extra client requests are necessary for Valgrind to
+   understand the distinction between the allocator and the rest of the
+   program.
+
+   Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request;  it
+   has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
+   
+   Ignored if addr == 0.
+*/
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)    \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MALLOCLIKE_BLOCK,      \
+                               addr, sizeB, rzB, is_zeroed, 0);   \
+   }
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+   Ignored if addr == 0.
+*/
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB)                        \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__FREELIKE_BLOCK,        \
+                               addr, rzB, 0, 0, 0);               \
+   }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__CREATE_MEMPOOL,        \
+                               pool, rzB, is_zeroed, 0, 0);       \
+   }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool)                            \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DESTROY_MEMPOOL,       \
+                               pool, 0, 0, 0, 0);                 \
+   }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)                  \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_ALLOC,         \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr)                         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_FREE,          \
+                               pool, addr, 0, 0, 0);              \
+   }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size)                   \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_TRIM,          \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB)                       \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MOVE_MEMPOOL,          \
+                               poolA, poolB, 0, 0, 0);            \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_CHANGE,        \
+                               pool, addrA, addrB, size, 0);      \
+   }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool)                             \
+   __extension__                                                  \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_EXISTS,        \
+                               pool, 0, 0, 0, 0);                 \
+    _qzz_res;                                                     \
+   })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end)                       \
+   __extension__                                                  \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_REGISTER,        \
+                               start, end, 0, 0, 0);              \
+    _qzz_res;                                                     \
+   })
+
+/* Unmark the piece of memory associated with a stack id as being a
+   stack. */
+#define VALGRIND_STACK_DEREGISTER(id)                             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_DEREGISTER,      \
+                               id, 0, 0, 0, 0);                   \
+   }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end)                     \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_CHANGE,          \
+                               id, start, end, 0, 0);             \
+   }
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta)   \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__LOAD_PDB_DEBUGINFO,    \
+                               fd, ptr, total_size, delta, 0);    \
+   }
+
+/* Map a code address to a source file name and line number.  buf64
+   must point to a 64-byte buffer in the caller's address space.  The
+   result will be dumped in there and is guaranteed to be zero
+   terminated.  If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64)                    \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MAP_IP_TO_SRCLOC,      \
+                               addr, buf64, 0, 0, 0);             \
+   }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif   /* __VALGRIND_H */
+
+#endif
diff --git a/base/threading/non_thread_safe_unittest.cc b/base/threading/non_thread_safe_unittest.cc
index 2a27c3f..d523fc5 100644
--- a/base/threading/non_thread_safe_unittest.cc
+++ b/base/threading/non_thread_safe_unittest.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/threading/non_thread_safe.h"
+
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
 #include "base/threading/simple_thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -72,7 +74,7 @@
   void Run() override { non_thread_safe_class_.reset(); }
 
  private:
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class_;
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class_;
 
   DISALLOW_COPY_AND_ASSIGN(DeleteNonThreadSafeClassOnThread);
 };
@@ -80,7 +82,7 @@
 }  // namespace
 
 TEST(NonThreadSafeTest, CallsAllowedOnSameThread) {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that DoStuff doesn't assert.
@@ -91,7 +93,7 @@
 }
 
 TEST(NonThreadSafeTest, DetachThenDestructOnDifferentThread) {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that the destructor doesn't assert when called on a different thread
@@ -107,7 +109,7 @@
 #if GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
 
 void NonThreadSafeClass::MethodOnDifferentThreadImpl() {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that DoStuff asserts in debug builds only when called
@@ -131,7 +133,7 @@
 #endif  // ENABLE_NON_THREAD_SAFE
 
 void NonThreadSafeClass::DestructorOnDifferentThreadImpl() {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that the destructor asserts in debug builds only
diff --git a/base/threading/platform_thread.h b/base/threading/platform_thread.h
index e2b09bc..9b217a9 100644
--- a/base/threading/platform_thread.h
+++ b/base/threading/platform_thread.h
@@ -99,7 +99,7 @@
 
 // Valid values for priority of Thread::Options and SimpleThread::Options, and
 // SetCurrentThreadPriority(), listed in increasing order of importance.
-enum class ThreadPriority {
+enum class ThreadPriority : int {
   // Suitable for threads that shouldn't disrupt high priority work.
   BACKGROUND,
   // Default priority level.
@@ -142,8 +142,8 @@
   // Sleeps for the specified duration.
   static void Sleep(base::TimeDelta duration);
 
-  // Sets the thread name visible to debuggers/tools. This has no effect
-  // otherwise.
+  // Sets the thread name visible to debuggers/tools. This will try to
+  // initialize the context for current thread unless it's a WorkerThread.
   static void SetName(const std::string& name);
 
   // Gets the thread name, if previously set by SetName.
@@ -180,9 +180,14 @@
   // |thread_handle|.
   static void Join(PlatformThreadHandle thread_handle);
 
+  // Detaches and releases the thread handle. The thread is no longer joinable
+  // and |thread_handle| is invalidated after this call.
+  static void Detach(PlatformThreadHandle thread_handle);
+
   // Toggles the current thread's priority at runtime. A thread may not be able
   // to raise its priority back up after lowering it if the process does not
-  // have a proper permission, e.g. CAP_SYS_NICE on Linux.
+  // have a proper permission, e.g. CAP_SYS_NICE on Linux. A thread may not be
+  // able to lower its priority back down after raising it to REALTIME_AUDIO.
   // Since changing other threads' priority is not permitted in favor of
   // security, this interface is restricted to change only the current thread
   // priority (https://crbug.com/399473).
diff --git a/base/threading/platform_thread_internal_posix.cc b/base/threading/platform_thread_internal_posix.cc
index 9af0204..378a24d 100644
--- a/base/threading/platform_thread_internal_posix.cc
+++ b/base/threading/platform_thread_internal_posix.cc
@@ -4,6 +4,7 @@
 
 #include "base/threading/platform_thread_internal_posix.h"
 
+#include "base/containers/adapters.h"
 #include "base/logging.h"
 
 namespace base {
@@ -11,8 +12,7 @@
 namespace internal {
 
 int ThreadPriorityToNiceValue(ThreadPriority priority) {
-  for (const ThreadPriorityToNiceValuePair& pair :
-       kThreadPriorityToNiceValueMap) {
+  for (const auto& pair : kThreadPriorityToNiceValueMap) {
     if (pair.priority == priority)
       return pair.nice_value;
   }
@@ -21,13 +21,17 @@
 }
 
 ThreadPriority NiceValueToThreadPriority(int nice_value) {
-  for (const ThreadPriorityToNiceValuePair& pair :
-       kThreadPriorityToNiceValueMap) {
-    if (pair.nice_value == nice_value)
+  // Try to find a priority that best describes |nice_value|. If there isn't
+  // an exact match, this method returns the closest priority whose nice value
+  // is higher (lower priority) than |nice_value|.
+  for (const auto& pair : Reversed(kThreadPriorityToNiceValueMap)) {
+    if (pair.nice_value >= nice_value)
       return pair.priority;
   }
-  NOTREACHED() << "Unknown nice value";
-  return ThreadPriority::NORMAL;
+
+  // Reaching here means |nice_value| is more than any of the defined
+  // priorities. The lowest priority is suitable in this case.
+  return ThreadPriority::BACKGROUND;
 }
 
 }  // namespace internal
diff --git a/base/threading/platform_thread_internal_posix.h b/base/threading/platform_thread_internal_posix.h
index 05a8d1e..5f4a215 100644
--- a/base/threading/platform_thread_internal_posix.h
+++ b/base/threading/platform_thread_internal_posix.h
@@ -5,6 +5,7 @@
 #ifndef BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
 #define BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
 
+#include "base/base_export.h"
 #include "base/threading/platform_thread.h"
 
 namespace base {
@@ -15,7 +16,11 @@
   ThreadPriority priority;
   int nice_value;
 };
-extern const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4];
+// The elements must be listed in the order of increasing priority (lowest
+// priority first), that is, in the order of decreasing nice values (highest
+// nice value first).
+BASE_EXPORT extern
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4];
 
 // Returns the nice value matching |priority| based on the platform-specific
 // implementation of kThreadPriorityToNiceValueMap.
@@ -23,7 +28,7 @@
 
 // Returns the ThreadPrioirty matching |nice_value| based on the platform-
 // specific implementation of kThreadPriorityToNiceValueMap.
-ThreadPriority NiceValueToThreadPriority(int nice_value);
+BASE_EXPORT ThreadPriority NiceValueToThreadPriority(int nice_value);
 
 // Allows platform specific tweaks to the generic POSIX solution for
 // SetCurrentThreadPriority. Returns true if the platform-specific
diff --git a/base/threading/platform_thread_linux.cc b/base/threading/platform_thread_linux.cc
index 3e7ee68..ab7c97e 100644
--- a/base/threading/platform_thread_linux.cc
+++ b/base/threading/platform_thread_linux.cc
@@ -29,30 +29,19 @@
 namespace {
 #if !defined(OS_NACL)
 const struct sched_param kRealTimePrio = {8};
-const struct sched_param kResetPrio = {0};
 #endif
 }  // namespace
 
 const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
     {ThreadPriority::BACKGROUND, 10},
     {ThreadPriority::NORMAL, 0},
-    {ThreadPriority::DISPLAY, -6},
+    {ThreadPriority::DISPLAY, -8},
     {ThreadPriority::REALTIME_AUDIO, -10},
 };
 
 bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
 #if !defined(OS_NACL)
-  ThreadPriority current_priority;
-  if (priority != ThreadPriority::REALTIME_AUDIO &&
-      GetCurrentThreadPriorityForPlatform(&current_priority) &&
-      current_priority == ThreadPriority::REALTIME_AUDIO) {
-    // If the pthread's round-robin scheduler is already enabled, and the new
-    // priority will use setpriority() instead, the pthread scheduler should be
-    // reset to use SCHED_OTHER so that setpriority() just works.
-    pthread_setschedparam(pthread_self(), SCHED_OTHER, &kResetPrio);
-    return false;
-  }
-  return priority == ThreadPriority::REALTIME_AUDIO  &&
+  return priority == ThreadPriority::REALTIME_AUDIO &&
          pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
 #else
   return false;
@@ -103,11 +92,9 @@
 
 void InitThreading() {}
 
-void InitOnThread() {}
-
 void TerminateOnThread() {}
 
-size_t GetDefaultThreadStackSize(const pthread_attr_t& /* attributes */) {
+size_t GetDefaultThreadStackSize(const pthread_attr_t& /*attributes*/) {
 #if !defined(THREAD_SANITIZER)
   return 0;
 #else
diff --git a/base/threading/platform_thread_mac.mm b/base/threading/platform_thread_mac.mm
index df11f85..51f3621 100644
--- a/base/threading/platform_thread_mac.mm
+++ b/base/threading/platform_thread_mac.mm
@@ -15,6 +15,7 @@
 
 #include "base/lazy_instance.h"
 #include "base/logging.h"
+#include "base/mac/foundation_util.h"
 #include "base/mac/mach_logging.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/tracked_objects.h"
@@ -22,6 +23,10 @@
 
 namespace base {
 
+namespace {
+NSString* const kThreadPriorityKey = @"CrThreadPriorityKey";
+}  // namespace
+
 // If Cocoa is to be used on more than one thread, it must know that the
 // application is multithreaded.  Since it's possible to enter Cocoa code
 // from threads created by pthread_thread_create, Cocoa won't necessarily
@@ -164,21 +169,41 @@
 
   switch (priority) {
     case ThreadPriority::NORMAL:
+    case ThreadPriority::BACKGROUND:
+    case ThreadPriority::DISPLAY:
+      // Add support for non-NORMAL thread priorities. https://crbug.com/554651
       SetPriorityNormal(mach_thread_id);
       break;
     case ThreadPriority::REALTIME_AUDIO:
       SetPriorityRealtimeAudio(mach_thread_id);
       break;
-    default:
-      NOTREACHED() << "Unknown priority.";
-      break;
   }
+
+  [[[NSThread currentThread] threadDictionary]
+      setObject:@(static_cast<int>(priority))
+         forKey:kThreadPriorityKey];
 }
 
 // static
 ThreadPriority PlatformThread::GetCurrentThreadPriority() {
-  NOTIMPLEMENTED();
-  return ThreadPriority::NORMAL;
+  NSNumber* priority = base::mac::ObjCCast<NSNumber>([[[NSThread currentThread]
+      threadDictionary] objectForKey:kThreadPriorityKey]);
+
+  if (!priority)
+    return ThreadPriority::NORMAL;
+
+  ThreadPriority thread_priority =
+      static_cast<ThreadPriority>(priority.intValue);
+  switch (thread_priority) {
+    case ThreadPriority::BACKGROUND:
+    case ThreadPriority::NORMAL:
+    case ThreadPriority::DISPLAY:
+    case ThreadPriority::REALTIME_AUDIO:
+      return thread_priority;
+    default:
+      NOTREACHED() << "Unknown priority.";
+      return ThreadPriority::NORMAL;
+  }
 }
 
 size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
@@ -216,9 +241,6 @@
 #endif
 }
 
-void InitOnThread() {
-}
-
 void TerminateOnThread() {
 }
 
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index 39a0073..2321b3c 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -12,9 +12,10 @@
 #include <sys/resource.h>
 #include <sys/time.h>
 
+#include <memory>
+
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/threading/platform_thread_internal_posix.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/threading/thread_restrictions.h"
@@ -29,7 +30,6 @@
 namespace base {
 
 void InitThreading();
-void InitOnThread();
 void TerminateOnThread();
 size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes);
 
@@ -45,19 +45,22 @@
 };
 
 void* ThreadFunc(void* params) {
-  base::InitOnThread();
-
   PlatformThread::Delegate* delegate = nullptr;
 
   {
-    scoped_ptr<ThreadParams> thread_params(static_cast<ThreadParams*>(params));
+    std::unique_ptr<ThreadParams> thread_params(
+        static_cast<ThreadParams*>(params));
 
     delegate = thread_params->delegate;
     if (!thread_params->joinable)
       base::ThreadRestrictions::SetSingletonAllowed(false);
 
-    if (thread_params->priority != ThreadPriority::NORMAL)
-      PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+#if !defined(OS_NACL)
+    // Threads on linux/android may inherit their priority from the thread
+    // where they were created. This explicitly sets the priority of all new
+    // threads.
+    PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+#endif
   }
 
   ThreadIdNameManager::GetInstance()->RegisterThread(
@@ -97,7 +100,7 @@
   if (stack_size > 0)
     pthread_attr_setstacksize(&attributes, stack_size);
 
-  scoped_ptr<ThreadParams> params(new ThreadParams);
+  std::unique_ptr<ThreadParams> params(new ThreadParams);
   params->delegate = delegate;
   params->joinable = joinable;
   params->priority = priority;
@@ -206,6 +209,11 @@
   CHECK_EQ(0, pthread_join(thread_handle.platform_handle(), NULL));
 }
 
+// static
+void PlatformThread::Detach(PlatformThreadHandle thread_handle) {
+  CHECK_EQ(0, pthread_detach(thread_handle.platform_handle()));
+}
+
 // Mac has its own Set/GetCurrentThreadPriority() implementations.
 #if !defined(OS_MACOSX)
 
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index 52f8d1b..2d99ed8 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -14,54 +14,83 @@
 #if defined(OS_POSIX)
 #include <sys/types.h>
 #include <unistd.h>
+#include "base/threading/platform_thread_internal_posix.h"
 #elif defined(OS_WIN)
 #include <windows.h>
 #endif
 
 namespace base {
 
-// Trivial tests that thread runs and doesn't crash on create and join ---------
+// Trivial tests that thread runs and doesn't crash on create, join, or detach -
 
 namespace {
 
 class TrivialThread : public PlatformThread::Delegate {
  public:
-  TrivialThread() : did_run_(false) {}
+  TrivialThread() : run_event_(WaitableEvent::ResetPolicy::MANUAL,
+                               WaitableEvent::InitialState::NOT_SIGNALED) {}
 
-  void ThreadMain() override { did_run_ = true; }
+  void ThreadMain() override { run_event_.Signal(); }
 
-  bool did_run() const { return did_run_; }
+  WaitableEvent& run_event() { return run_event_; }
 
  private:
-  bool did_run_;
+  WaitableEvent run_event_;
 
   DISALLOW_COPY_AND_ASSIGN(TrivialThread);
 };
 
 }  // namespace
 
-TEST(PlatformThreadTest, Trivial) {
+TEST(PlatformThreadTest, TrivialJoin) {
   TrivialThread thread;
   PlatformThreadHandle handle;
 
-  ASSERT_FALSE(thread.did_run());
+  ASSERT_FALSE(thread.run_event().IsSignaled());
   ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
   PlatformThread::Join(handle);
-  ASSERT_TRUE(thread.did_run());
+  ASSERT_TRUE(thread.run_event().IsSignaled());
 }
 
-TEST(PlatformThreadTest, TrivialTimesTen) {
+TEST(PlatformThreadTest, TrivialJoinTimesTen) {
   TrivialThread thread[10];
   PlatformThreadHandle handle[arraysize(thread)];
 
   for (size_t n = 0; n < arraysize(thread); n++)
-    ASSERT_FALSE(thread[n].did_run());
+    ASSERT_FALSE(thread[n].run_event().IsSignaled());
   for (size_t n = 0; n < arraysize(thread); n++)
     ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
   for (size_t n = 0; n < arraysize(thread); n++)
     PlatformThread::Join(handle[n]);
   for (size_t n = 0; n < arraysize(thread); n++)
-    ASSERT_TRUE(thread[n].did_run());
+    ASSERT_TRUE(thread[n].run_event().IsSignaled());
+}
+
+// The following detach tests are by nature racy. The run_event approximates the
+// end and termination of the thread, but threads could persist shortly after
+// the test completes.
+TEST(PlatformThreadTest, TrivialDetach) {
+  TrivialThread thread;
+  PlatformThreadHandle handle;
+
+  ASSERT_FALSE(thread.run_event().IsSignaled());
+  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+  PlatformThread::Detach(handle);
+  thread.run_event().Wait();
+}
+
+TEST(PlatformThreadTest, TrivialDetachTimesTen) {
+  TrivialThread thread[10];
+  PlatformThreadHandle handle[arraysize(thread)];
+
+  for (size_t n = 0; n < arraysize(thread); n++)
+    ASSERT_FALSE(thread[n].run_event().IsSignaled());
+  for (size_t n = 0; n < arraysize(thread); n++) {
+    ASSERT_TRUE(PlatformThread::Create(0, &thread[n], &handle[n]));
+    PlatformThread::Detach(handle[n]);
+  }
+  for (size_t n = 0; n < arraysize(thread); n++)
+    thread[n].run_event().Wait();
 }
 
 // Tests of basic thread functions ---------------------------------------------
@@ -72,8 +101,10 @@
  public:
   FunctionTestThread()
       : thread_id_(kInvalidThreadId),
-        termination_ready_(true, false),
-        terminate_thread_(true, false),
+        termination_ready_(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED),
+        terminate_thread_(WaitableEvent::ResetPolicy::MANUAL,
+                          WaitableEvent::InitialState::NOT_SIGNALED),
         done_(false) {}
   ~FunctionTestThread() override {
     EXPECT_TRUE(terminate_thread_.IsSignaled())
@@ -217,7 +248,8 @@
 
 class ThreadPriorityTestThread : public FunctionTestThread {
  public:
-  ThreadPriorityTestThread() = default;
+  explicit ThreadPriorityTestThread(ThreadPriority priority)
+      : priority_(priority) {}
   ~ThreadPriorityTestThread() override = default;
 
  private:
@@ -226,50 +258,108 @@
     EXPECT_EQ(ThreadPriority::NORMAL,
               PlatformThread::GetCurrentThreadPriority());
 
-    // Toggle each supported priority on the current thread and confirm it
-    // affects it.
-    const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
-    for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
-      SCOPED_TRACE(i);
-      if (!bumping_priority_allowed &&
-          kThreadPriorityTestValues[i] >
-              PlatformThread::GetCurrentThreadPriority()) {
-        continue;
-      }
-
-      // Alter and verify the current thread's priority.
-      PlatformThread::SetCurrentThreadPriority(kThreadPriorityTestValues[i]);
-      EXPECT_EQ(kThreadPriorityTestValues[i],
-                PlatformThread::GetCurrentThreadPriority());
-    }
+    // Alter and verify the current thread's priority.
+    PlatformThread::SetCurrentThreadPriority(priority_);
+    EXPECT_EQ(priority_, PlatformThread::GetCurrentThreadPriority());
   }
 
+  const ThreadPriority priority_;
+
   DISALLOW_COPY_AND_ASSIGN(ThreadPriorityTestThread);
 };
 
 }  // namespace
 
-#if defined(OS_MACOSX)
-// PlatformThread::GetCurrentThreadPriority() is not implemented on OS X.
-#define MAYBE_ThreadPriorityCurrentThread DISABLED_ThreadPriorityCurrentThread
-#else
-#define MAYBE_ThreadPriorityCurrentThread ThreadPriorityCurrentThread
-#endif
-
 // Test changing a created thread's priority (which has different semantics on
 // some platforms).
-TEST(PlatformThreadTest, MAYBE_ThreadPriorityCurrentThread) {
-  ThreadPriorityTestThread thread;
-  PlatformThreadHandle handle;
+TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
+  const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
+  if (bumping_priority_allowed) {
+    // Bump the priority in order to verify that new threads are started with
+    // normal priority.
+    PlatformThread::SetCurrentThreadPriority(ThreadPriority::DISPLAY);
+  }
 
-  ASSERT_FALSE(thread.IsRunning());
-  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
-  thread.WaitForTerminationReady();
-  ASSERT_TRUE(thread.IsRunning());
+  // Toggle each supported priority on the thread and confirm it affects it.
+  for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
+    if (!bumping_priority_allowed &&
+        kThreadPriorityTestValues[i] >
+            PlatformThread::GetCurrentThreadPriority()) {
+      continue;
+    }
 
-  thread.MarkForTermination();
-  PlatformThread::Join(handle);
-  ASSERT_FALSE(thread.IsRunning());
+    ThreadPriorityTestThread thread(kThreadPriorityTestValues[i]);
+    PlatformThreadHandle handle;
+
+    ASSERT_FALSE(thread.IsRunning());
+    ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+    thread.WaitForTerminationReady();
+    ASSERT_TRUE(thread.IsRunning());
+
+    thread.MarkForTermination();
+    PlatformThread::Join(handle);
+    ASSERT_FALSE(thread.IsRunning());
+  }
 }
 
+// Test for a function defined in platform_thread_internal_posix.cc. On OSX and
+// iOS, platform_thread_internal_posix.cc is not compiled, so these platforms
+// are excluded here, too.
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_IOS)
+TEST(PlatformThreadTest, GetNiceValueToThreadPriority) {
+  using internal::NiceValueToThreadPriority;
+  using internal::kThreadPriorityToNiceValueMap;
+
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            kThreadPriorityToNiceValueMap[0].priority);
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            kThreadPriorityToNiceValueMap[1].priority);
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            kThreadPriorityToNiceValueMap[2].priority);
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            kThreadPriorityToNiceValueMap[3].priority);
+
+  static const int kBackgroundNiceValue =
+      kThreadPriorityToNiceValueMap[0].nice_value;
+  static const int kNormalNiceValue =
+      kThreadPriorityToNiceValueMap[1].nice_value;
+  static const int kDisplayNiceValue =
+      kThreadPriorityToNiceValueMap[2].nice_value;
+  static const int kRealtimeAudioNiceValue =
+      kThreadPriorityToNiceValueMap[3].nice_value;
+
+  // The tests below assume the nice values specified in the map are within
+  // the range below (both ends exclusive).
+  static const int kHighestNiceValue = 19;
+  static const int kLowestNiceValue = -20;
+
+  EXPECT_GT(kHighestNiceValue, kBackgroundNiceValue);
+  EXPECT_GT(kBackgroundNiceValue, kNormalNiceValue);
+  EXPECT_GT(kNormalNiceValue, kDisplayNiceValue);
+  EXPECT_GT(kDisplayNiceValue, kRealtimeAudioNiceValue);
+  EXPECT_GT(kRealtimeAudioNiceValue, kLowestNiceValue);
+
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kHighestNiceValue));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kBackgroundNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kBackgroundNiceValue));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kNormalNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            NiceValueToThreadPriority(kNormalNiceValue));
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            NiceValueToThreadPriority(kDisplayNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            NiceValueToThreadPriority(kDisplayNiceValue));
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            NiceValueToThreadPriority(kRealtimeAudioNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            NiceValueToThreadPriority(kRealtimeAudioNiceValue));
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            NiceValueToThreadPriority(kLowestNiceValue));
+}
+#endif
+
 }  // namespace base
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index 80ca520..c906866 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -7,7 +7,7 @@
 #include "base/bind.h"
 #include "base/location.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
diff --git a/base/threading/sequenced_task_runner_handle.cc b/base/threading/sequenced_task_runner_handle.cc
new file mode 100644
index 0000000..88b36a8
--- /dev/null
+++ b/base/threading/sequenced_task_runner_handle.cc
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequenced_task_runner_handle.h"
+
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+base::LazyInstance<base::ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
+    lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+// static
+scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
+  // Return the registered SequencedTaskRunner, if any.
+  const SequencedTaskRunnerHandle* handle = lazy_tls_ptr.Pointer()->Get();
+  if (handle) {
+    // Various modes of setting SequencedTaskRunnerHandle don't combine.
+    DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+    DCHECK(!SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread());
+    return handle->task_runner_;
+  }
+
+  // Return the SequencedTaskRunner obtained from SequencedWorkerPool, if any.
+  scoped_refptr<base::SequencedTaskRunner> task_runner =
+      SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread();
+  if (task_runner) {
+    DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+    return task_runner;
+  }
+
+  // Return the SingleThreadTaskRunner for the current thread otherwise.
+  return base::ThreadTaskRunnerHandle::Get();
+}
+
+// static
+bool SequencedTaskRunnerHandle::IsSet() {
+  return lazy_tls_ptr.Pointer()->Get() ||
+         SequencedWorkerPool::GetWorkerPoolForCurrentThread() ||
+         base::ThreadTaskRunnerHandle::IsSet();
+}
+
+SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
+    scoped_refptr<SequencedTaskRunner> task_runner)
+    : task_runner_(std::move(task_runner)) {
+  DCHECK(task_runner_->RunsTasksOnCurrentThread());
+  DCHECK(!SequencedTaskRunnerHandle::IsSet());
+  lazy_tls_ptr.Pointer()->Set(this);
+}
+
+SequencedTaskRunnerHandle::~SequencedTaskRunnerHandle() {
+  DCHECK(task_runner_->RunsTasksOnCurrentThread());
+  DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
+  lazy_tls_ptr.Pointer()->Set(nullptr);
+}
+
+}  // namespace base
diff --git a/base/threading/sequenced_task_runner_handle.h b/base/threading/sequenced_task_runner_handle.h
new file mode 100644
index 0000000..e6dec1e
--- /dev/null
+++ b/base/threading/sequenced_task_runner_handle.h
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+class BASE_EXPORT SequencedTaskRunnerHandle {
+ public:
+  // Returns a SequencedTaskRunner which guarantees that posted tasks will only
+  // run after the current task is finished and will satisfy a SequenceChecker.
+  // It should only be called if IsSet() returns true (see the comment there for
+  // the requirements).
+  static scoped_refptr<SequencedTaskRunner> Get();
+
+  // Returns true if one of the following conditions is fulfilled:
+  // a) A SequencedTaskRunner has been assigned to the current thread by
+  //    instantiating a SequencedTaskRunnerHandle.
+  // b) The current thread has a ThreadTaskRunnerHandle (which includes any
+  //    thread that has a MessageLoop associated with it), or
+  // c) The current thread is a worker thread belonging to a
+  //    SequencedWorkerPool.
+  static bool IsSet();
+
+  // Binds |task_runner| to the current thread.
+  explicit SequencedTaskRunnerHandle(
+      scoped_refptr<SequencedTaskRunner> task_runner);
+  ~SequencedTaskRunnerHandle();
+
+ private:
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequencedTaskRunnerHandle);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index 3cc50f4..57961b5 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -8,6 +8,7 @@
 
 #include <list>
 #include <map>
+#include <memory>
 #include <set>
 #include <utility>
 #include <vector>
@@ -19,17 +20,18 @@
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/linked_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/simple_thread.h"
 #include "base/threading/thread_local.h"
 #include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/trace_event.h"
 #include "base/tracked_objects.h"
 #include "build/build_config.h"
@@ -98,7 +100,7 @@
 class SequencedWorkerPoolTaskRunner : public TaskRunner {
  public:
   SequencedWorkerPoolTaskRunner(
-      const scoped_refptr<SequencedWorkerPool>& pool,
+      scoped_refptr<SequencedWorkerPool> pool,
       SequencedWorkerPool::WorkerShutdown shutdown_behavior);
 
   // TaskRunner implementation
@@ -118,11 +120,9 @@
 };
 
 SequencedWorkerPoolTaskRunner::SequencedWorkerPoolTaskRunner(
-    const scoped_refptr<SequencedWorkerPool>& pool,
+    scoped_refptr<SequencedWorkerPool> pool,
     SequencedWorkerPool::WorkerShutdown shutdown_behavior)
-    : pool_(pool),
-      shutdown_behavior_(shutdown_behavior) {
-}
+    : pool_(std::move(pool)), shutdown_behavior_(shutdown_behavior) {}
 
 SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
 }
@@ -131,7 +131,7 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  if (delay == TimeDelta()) {
+  if (delay.is_zero()) {
     return pool_->PostWorkerTaskWithShutdownBehavior(
         from_here, task, shutdown_behavior_);
   }
@@ -150,7 +150,7 @@
 class SequencedWorkerPoolSequencedTaskRunner : public SequencedTaskRunner {
  public:
   SequencedWorkerPoolSequencedTaskRunner(
-      const scoped_refptr<SequencedWorkerPool>& pool,
+      scoped_refptr<SequencedWorkerPool> pool,
       SequencedWorkerPool::SequenceToken token,
       SequencedWorkerPool::WorkerShutdown shutdown_behavior);
 
@@ -178,13 +178,12 @@
 };
 
 SequencedWorkerPoolSequencedTaskRunner::SequencedWorkerPoolSequencedTaskRunner(
-    const scoped_refptr<SequencedWorkerPool>& pool,
+    scoped_refptr<SequencedWorkerPool> pool,
     SequencedWorkerPool::SequenceToken token,
     SequencedWorkerPool::WorkerShutdown shutdown_behavior)
-    : pool_(pool),
+    : pool_(std::move(pool)),
       token_(token),
-      shutdown_behavior_(shutdown_behavior) {
-}
+      shutdown_behavior_(shutdown_behavior) {}
 
 SequencedWorkerPoolSequencedTaskRunner::
 ~SequencedWorkerPoolSequencedTaskRunner() {
@@ -194,7 +193,7 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  if (delay == TimeDelta()) {
+  if (delay.is_zero()) {
     return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
         token_, from_here, task, shutdown_behavior_);
   }
@@ -230,7 +229,7 @@
  public:
   // Hold a (cyclic) ref to |worker_pool|, since we want to keep it
   // around as long as we are running.
-  Worker(const scoped_refptr<SequencedWorkerPool>& worker_pool,
+  Worker(scoped_refptr<SequencedWorkerPool> worker_pool,
          int thread_number,
          const std::string& thread_name_prefix);
   ~Worker() override;
@@ -448,7 +447,7 @@
   // Owning pointers to all threads we've created so far, indexed by
   // ID. Since we lazily create threads, this may be less than
   // max_threads_ and will be initially empty.
-  typedef std::map<PlatformThreadId, linked_ptr<Worker> > ThreadMap;
+  using ThreadMap = std::map<PlatformThreadId, std::unique_ptr<Worker>>;
   ThreadMap threads_;
 
   // Set to true when we're in the process of creating another thread.
@@ -504,11 +503,11 @@
 // Worker definitions ---------------------------------------------------------
 
 SequencedWorkerPool::Worker::Worker(
-    const scoped_refptr<SequencedWorkerPool>& worker_pool,
+    scoped_refptr<SequencedWorkerPool> worker_pool,
     int thread_number,
     const std::string& prefix)
     : SimpleThread(prefix + StringPrintf("Worker%d", thread_number)),
-      worker_pool_(worker_pool),
+      worker_pool_(std::move(worker_pool)),
       task_shutdown_behavior_(BLOCK_SHUTDOWN),
       is_processing_task_(false) {
   Start();
@@ -612,7 +611,7 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  DCHECK(delay == TimeDelta() || shutdown_behavior == SKIP_ON_SHUTDOWN);
+  DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN);
   SequencedTask sequenced(from_here);
   sequenced.sequence_token_id = sequence_token.id_;
   sequenced.shutdown_behavior = shutdown_behavior;
@@ -788,9 +787,8 @@
     AutoLock lock(lock_);
     DCHECK(thread_being_created_);
     thread_being_created_ = false;
-    std::pair<ThreadMap::iterator, bool> result =
-        threads_.insert(
-            std::make_pair(this_worker->tid(), make_linked_ptr(this_worker)));
+    auto result = threads_.insert(
+        std::make_pair(this_worker->tid(), WrapUnique(this_worker)));
     DCHECK(result.second);
 
     while (true) {
@@ -813,6 +811,8 @@
             TRACE_EVENT_FLAG_FLOW_IN,
             "src_file", task.posted_from.file_name(),
             "src_func", task.posted_from.function_name());
+        TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION task_event(
+            task.posted_from.file_name());
         int new_thread_id = WillRunWorkerTask(task);
         {
           AutoUnlock unlock(lock_);
@@ -1317,7 +1317,7 @@
     const Closure& task,
     TimeDelta delay) {
   WorkerShutdown shutdown_behavior =
-      delay == TimeDelta() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
+      delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
   return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
                           from_here, task, delay);
 }
@@ -1344,7 +1344,7 @@
     const Closure& task,
     TimeDelta delay) {
   WorkerShutdown shutdown_behavior =
-      delay == TimeDelta() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
+      delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
   return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
                           from_here, task, delay);
 }
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index ba0e444..cbec395 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -8,13 +8,13 @@
 #include <stddef.h>
 
 #include <cstddef>
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/callback_forward.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
 #include "base/task_runner.h"
 
@@ -374,7 +374,7 @@
 
   // Avoid pulling in too many headers by putting (almost) everything
   // into |inner_|.
-  const scoped_ptr<Inner> inner_;
+  const std::unique_ptr<Inner> inner_;
 
   DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPool);
 };
diff --git a/base/threading/simple_thread.cc b/base/threading/simple_thread.cc
index 7059cea..6c64a17 100644
--- a/base/threading/simple_thread.cc
+++ b/base/threading/simple_thread.cc
@@ -12,15 +12,24 @@
 namespace base {
 
 SimpleThread::SimpleThread(const std::string& name_prefix)
-    : name_prefix_(name_prefix), name_(name_prefix),
-      thread_(), event_(true, false), tid_(0), joined_(false) {
-}
+    : name_prefix_(name_prefix),
+      name_(name_prefix),
+      thread_(),
+      event_(WaitableEvent::ResetPolicy::MANUAL,
+             WaitableEvent::InitialState::NOT_SIGNALED),
+      tid_(0),
+      joined_(false) {}
 
 SimpleThread::SimpleThread(const std::string& name_prefix,
                            const Options& options)
-    : name_prefix_(name_prefix), name_(name_prefix), options_(options),
-      thread_(), event_(true, false), tid_(0), joined_(false) {
-}
+    : name_prefix_(name_prefix),
+      name_(name_prefix),
+      options_(options),
+      thread_(),
+      event_(WaitableEvent::ResetPolicy::MANUAL,
+             WaitableEvent::InitialState::NOT_SIGNALED),
+      tid_(0),
+      joined_(false) {}
 
 SimpleThread::~SimpleThread() {
   DCHECK(HasBeenStarted()) << "SimpleThread was never started.";
@@ -93,8 +102,8 @@
     int num_threads)
     : name_prefix_(name_prefix),
       num_threads_(num_threads),
-      dry_(true, false) {
-}
+      dry_(WaitableEvent::ResetPolicy::MANUAL,
+           WaitableEvent::InitialState::NOT_SIGNALED) {}
 
 DelegateSimpleThreadPool::~DelegateSimpleThreadPool() {
   DCHECK(threads_.empty());
diff --git a/base/threading/simple_thread_unittest.cc b/base/threading/simple_thread_unittest.cc
index 7229d36..14dd459 100644
--- a/base/threading/simple_thread_unittest.cc
+++ b/base/threading/simple_thread_unittest.cc
@@ -95,7 +95,8 @@
 
 TEST(SimpleThreadTest, WaitForEvent) {
   // Create a thread, and wait for it to signal us.
-  WaitableEvent event(true, false);
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
 
   WaitEventRunner runner(&event);
   DelegateSimpleThread thread(&runner, "event_waiter");
@@ -108,7 +109,8 @@
 }
 
 TEST(SimpleThreadTest, NamedWithOptions) {
-  WaitableEvent event(true, false);
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
 
   WaitEventRunner runner(&event);
   SimpleThread::Options options;
@@ -152,7 +154,8 @@
   // We can reuse our pool.  Verify that all 10 threads can actually run in
   // parallel, so this test will only pass if there are actually 10 threads.
   AtomicSequenceNumber seq2;
-  WaitableEvent event(true, false);
+  WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
+                      WaitableEvent::InitialState::NOT_SIGNALED);
   // Changing 9 to 10, for example, would cause us JoinAll() to never return.
   VerifyPoolRunner verifier(&seq2, 9, &event);
   pool.Start();
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
index 783add8..9cdc691 100644
--- a/base/threading/thread.cc
+++ b/base/threading/thread.cc
@@ -7,6 +7,7 @@
 #include "base/bind.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
+#include "base/run_loop.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/threading/thread_local.h"
@@ -51,6 +52,8 @@
       priority(ThreadPriority::NORMAL) {
 }
 
+Thread::Options::Options(const Options& other) = default;
+
 Thread::Options::~Options() {
 }
 
@@ -63,11 +66,13 @@
       running_(false),
       thread_(0),
       id_(kInvalidThreadId),
-      id_event_(true, false),
+      id_event_(WaitableEvent::ResetPolicy::MANUAL,
+                WaitableEvent::InitialState::NOT_SIGNALED),
       message_loop_(nullptr),
       message_loop_timer_slack_(TIMER_SLACK_NONE),
       name_(name),
-      start_event_(false, false) {
+      start_event_(WaitableEvent::ResetPolicy::MANUAL,
+                   WaitableEvent::InitialState::NOT_SIGNALED) {
 }
 
 Thread::~Thread() {
@@ -101,8 +106,8 @@
     type = MessageLoop::TYPE_CUSTOM;
 
   message_loop_timer_slack_ = options.timer_slack;
-  scoped_ptr<MessageLoop> message_loop = MessageLoop::CreateUnbound(
-      type, options.message_pump_factory);
+  std::unique_ptr<MessageLoop> message_loop =
+      MessageLoop::CreateUnbound(type, options.message_pump_factory);
   message_loop_ = message_loop.get();
   start_event_.Reset();
 
@@ -195,8 +200,8 @@
   return running_;
 }
 
-void Thread::Run(MessageLoop* message_loop) {
-  message_loop->Run();
+void Thread::Run(MessageLoop*) {
+  RunLoop().Run();
 }
 
 void Thread::SetThreadWasQuitProperly(bool flag) {
@@ -223,13 +228,12 @@
 
   // Lazily initialize the message_loop so that it can run on this thread.
   DCHECK(message_loop_);
-  scoped_ptr<MessageLoop> message_loop(message_loop_);
+  std::unique_ptr<MessageLoop> message_loop(message_loop_);
   message_loop_->BindToCurrentThread();
-  message_loop_->set_thread_name(name_);
   message_loop_->SetTimerSlack(message_loop_timer_slack_);
 
 #if defined(OS_WIN)
-  scoped_ptr<win::ScopedCOMInitializer> com_initializer;
+  std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
   if (com_status_ != NONE) {
     com_initializer.reset((com_status_ == STA) ?
         new win::ScopedCOMInitializer() :
diff --git a/base/threading/thread.h b/base/threading/thread.h
index da985da..c9a77d7 100644
--- a/base/threading/thread.h
+++ b/base/threading/thread.h
@@ -7,12 +7,12 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/callback.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/message_loop/timer_slack.h"
 #include "base/single_thread_task_runner.h"
@@ -41,10 +41,11 @@
 class BASE_EXPORT Thread : PlatformThread::Delegate {
  public:
   struct BASE_EXPORT Options {
-    typedef Callback<scoped_ptr<MessagePump>()> MessagePumpFactory;
+    typedef Callback<std::unique_ptr<MessagePump>()> MessagePumpFactory;
 
     Options();
     Options(MessageLoop::Type type, size_t size);
+    Options(const Options& other);
     ~Options();
 
     // Specifies the type of message loop that will be allocated on the thread.
diff --git a/base/threading/thread_checker_unittest.cc b/base/threading/thread_checker_unittest.cc
index fd98f76..bc5b1e4 100644
--- a/base/threading/thread_checker_unittest.cc
+++ b/base/threading/thread_checker_unittest.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/threading/thread_checker.h"
+
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/threading/simple_thread.h"
-#include "base/threading/thread_checker.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 // Duplicated from base/threading/thread_checker.h so that we can be
@@ -72,7 +74,7 @@
   void Run() override { thread_checker_class_.reset(); }
 
  private:
-  scoped_ptr<ThreadCheckerClass> thread_checker_class_;
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class_;
 
   DISALLOW_COPY_AND_ASSIGN(DeleteThreadCheckerClassOnThread);
 };
@@ -80,7 +82,7 @@
 }  // namespace
 
 TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // Verify that DoStuff doesn't assert.
@@ -91,7 +93,7 @@
 }
 
 TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // Verify that the destructor doesn't assert
@@ -104,7 +106,7 @@
 }
 
 TEST(ThreadCheckerTest, DetachFromThread) {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // Verify that DoStuff doesn't assert when called on a different thread after
@@ -119,7 +121,7 @@
 #if GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
 
 void ThreadCheckerClass::MethodOnDifferentThreadImpl() {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // DoStuff should assert in debug builds only when called on a
@@ -143,7 +145,7 @@
 #endif  // ENABLE_THREAD_CHECKER
 
 void ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl() {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // DoStuff doesn't assert when called on a different thread
diff --git a/base/threading/thread_collision_warner_unittest.cc b/base/threading/thread_collision_warner_unittest.cc
index 79ca7e2..71447ef 100644
--- a/base/threading/thread_collision_warner_unittest.cc
+++ b/base/threading/thread_collision_warner_unittest.cc
@@ -2,13 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/threading/thread_collision_warner.h"
+
+#include <memory>
+
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/simple_thread.h"
-#include "base/threading/thread_collision_warner.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 // '' : local class member function does not have a body
@@ -19,7 +21,7 @@
 
 // Would cause a memory leak otherwise.
 #undef DFAKE_MUTEX
-#define DFAKE_MUTEX(obj) scoped_ptr<base::AsserterBase> obj
+#define DFAKE_MUTEX(obj) std::unique_ptr<base::AsserterBase> obj
 
 // In Release, we expect the AsserterBase::warn() to not happen.
 #define EXPECT_NDEBUG_FALSE_DEBUG_TRUE EXPECT_FALSE
diff --git a/base/threading/thread_id_name_manager.cc b/base/threading/thread_id_name_manager.cc
index 56cfa27..107e0dc 100644
--- a/base/threading/thread_id_name_manager.cc
+++ b/base/threading/thread_id_name_manager.cc
@@ -10,6 +10,7 @@
 #include "base/logging.h"
 #include "base/memory/singleton.h"
 #include "base/strings/string_util.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 
 namespace base {
 namespace {
@@ -50,27 +51,37 @@
 
 void ThreadIdNameManager::SetName(PlatformThreadId id,
                                   const std::string& name) {
-  AutoLock locked(lock_);
-  NameToInternedNameMap::iterator iter = name_to_interned_name_.find(name);
   std::string* leaked_str = NULL;
-  if (iter != name_to_interned_name_.end()) {
-    leaked_str = iter->second;
-  } else {
-    leaked_str = new std::string(name);
-    name_to_interned_name_[name] = leaked_str;
+  {
+    AutoLock locked(lock_);
+    NameToInternedNameMap::iterator iter = name_to_interned_name_.find(name);
+    if (iter != name_to_interned_name_.end()) {
+      leaked_str = iter->second;
+    } else {
+      leaked_str = new std::string(name);
+      name_to_interned_name_[name] = leaked_str;
+    }
+
+    ThreadIdToHandleMap::iterator id_to_handle_iter =
+        thread_id_to_handle_.find(id);
+
+    // The main thread of a process will not be created as a Thread object which
+    // means there is no PlatformThreadHandler registered.
+    if (id_to_handle_iter == thread_id_to_handle_.end()) {
+      main_process_name_ = leaked_str;
+      main_process_id_ = id;
+      return;
+    }
+    thread_handle_to_interned_name_[id_to_handle_iter->second] = leaked_str;
   }
 
-  ThreadIdToHandleMap::iterator id_to_handle_iter =
-      thread_id_to_handle_.find(id);
-
-  // The main thread of a process will not be created as a Thread object which
-  // means there is no PlatformThreadHandler registered.
-  if (id_to_handle_iter == thread_id_to_handle_.end()) {
-    main_process_name_ = leaked_str;
-    main_process_id_ = id;
-    return;
-  }
-  thread_handle_to_interned_name_[id_to_handle_iter->second] = leaked_str;
+  // Add the leaked thread name to heap profiler context tracker. The name added
+  // is valid for the lifetime of the process. AllocationContextTracker cannot
+  // call GetName(which holds a lock) during the first allocation because it can
+  // cause a deadlock when the first allocation happens in the
+  // ThreadIdNameManager itself when holding the lock.
+  trace_event::AllocationContextTracker::SetCurrentThreadName(
+      leaked_str->c_str());
 }
 
 const char* ThreadIdNameManager::GetName(PlatformThreadId id) {
diff --git a/base/threading/thread_local_unittest.cc b/base/threading/thread_local_unittest.cc
index e94c1db..cdc1ca6 100644
--- a/base/threading/thread_local_unittest.cc
+++ b/base/threading/thread_local_unittest.cc
@@ -82,7 +82,8 @@
   static char* const kBogusPointer = reinterpret_cast<char*>(0x1234);
 
   char* tls_val;
-  base::WaitableEvent done(true, false);
+  base::WaitableEvent done(WaitableEvent::ResetPolicy::MANUAL,
+                           WaitableEvent::InitialState::NOT_SIGNALED);
 
   GetThreadLocal getter(&tlp, &done);
   getter.set_ptr(&tls_val);
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index fb536ec..4212a4b 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -37,14 +37,14 @@
 class BrowserGpuChannelHostFactory;
 class BrowserGpuMemoryBufferManager;
 class BrowserShutdownProfileDumper;
+class BrowserSurfaceViewManager;
 class BrowserTestBase;
-class GpuChannelHost;
 class NestedMessagePumpAndroid;
 class ScopedAllowWaitForAndroidLayoutTests;
 class ScopedAllowWaitForDebugURL;
 class SoftwareOutputDeviceMus;
 class TextInputClientMac;
-class RasterWorkerPool;
+class CategorizedWorkerPool;
 }  // namespace content
 namespace dbus {
 class Bus;
@@ -53,15 +53,17 @@
 class BackendImpl;
 class InFlightIO;
 }
-namespace gles2 {
-class CommandBufferClientImpl;
+namespace gpu {
+class GpuChannelHost;
 }
 namespace mojo {
 namespace common {
 class MessagePumpMojo;
 }
+class SyncCallRestrictions;
 }
-namespace mus {
+namespace ui {
+class CommandBufferClientImpl;
 class CommandBufferLocal;
 class GpuState;
 }
@@ -81,7 +83,7 @@
 }
 
 namespace views {
-class WindowManagerConnection;
+class ScreenMus;
 }
 
 namespace base {
@@ -177,9 +179,9 @@
 #else
   // Inline the empty definitions of these functions so that they can be
   // compiled out.
-  static bool SetIOAllowed(bool /* allowed */) { return true; }
+  static bool SetIOAllowed(bool) { return true; }
   static void AssertIOAllowed() {}
-  static bool SetSingletonAllowed(bool /* allowed */) { return true; }
+  static bool SetSingletonAllowed(bool) { return true; }
   static void AssertSingletonAllowed() {}
   static void DisallowWaiting() {}
   static void AssertWaitAllowed() {}
@@ -189,6 +191,7 @@
   // DO NOT ADD ANY OTHER FRIEND STATEMENTS, talk to jam or brettw first.
   // BEGIN ALLOWED USAGE.
   friend class content::BrowserShutdownProfileDumper;
+  friend class content::BrowserSurfaceViewManager;
   friend class content::BrowserTestBase;
   friend class content::NestedMessagePumpAndroid;
   friend class content::ScopedAllowWaitForAndroidLayoutTests;
@@ -197,7 +200,7 @@
   friend class ::ScopedAllowWaitForLegacyWebViewApi;
   friend class cc::CompletionEvent;
   friend class cc::SingleThreadTaskGraphRunner;
-  friend class content::RasterWorkerPool;
+  friend class content::CategorizedWorkerPool;
   friend class remoting::AutoThread;
   friend class ui::WindowResizeHelperMac;
   friend class MessagePumpDefault;
@@ -207,10 +210,11 @@
   friend class ThreadTestHelper;
   friend class PlatformThread;
   friend class android::JavaHandlerThread;
-  friend class gles2::CommandBufferClientImpl;
   friend class mojo::common::MessagePumpMojo;
-  friend class mus::CommandBufferLocal;
-  friend class mus::GpuState;
+  friend class mojo::SyncCallRestrictions;
+  friend class ui::CommandBufferClientImpl;
+  friend class ui::CommandBufferLocal;
+  friend class ui::GpuState;
 
   // END ALLOWED USAGE.
   // BEGIN USAGE THAT NEEDS TO BE FIXED.
@@ -221,11 +225,11 @@
       content::BrowserGpuChannelHostFactory;      // http://crbug.com/125248
   friend class
       content::BrowserGpuMemoryBufferManager;     // http://crbug.com/420368
-  friend class content::GpuChannelHost;           // http://crbug.com/125264
   friend class content::TextInputClientMac;       // http://crbug.com/121917
   friend class dbus::Bus;                         // http://crbug.com/125222
   friend class disk_cache::BackendImpl;           // http://crbug.com/74623
   friend class disk_cache::InFlightIO;            // http://crbug.com/74623
+  friend class gpu::GpuChannelHost;               // http://crbug.com/125264
   friend class net::internal::AddressTrackerLinux;  // http://crbug.com/125097
   friend class net::NetworkChangeNotifierMac;     // http://crbug.com/125097
   friend class ::BrowserProcessImpl;              // http://crbug.com/125207
@@ -233,13 +237,13 @@
 #if !defined(OFFICIAL_BUILD)
   friend class content::SoftwareOutputDeviceMus;  // Interim non-production code
 #endif
-  friend class views::WindowManagerConnection;
+  friend class views::ScreenMus;
 // END USAGE THAT NEEDS TO BE FIXED.
 
 #if ENABLE_THREAD_RESTRICTIONS
   static bool SetWaitAllowed(bool allowed);
 #else
-  static bool SetWaitAllowed(bool /* allowed */) { return true; }
+  static bool SetWaitAllowed(bool) { return true; }
 #endif
 
   // Constructing a ScopedAllowWait temporarily allows waiting on the current
diff --git a/base/thread_task_runner_handle.cc b/base/threading/thread_task_runner_handle.cc
similarity index 61%
rename from base/thread_task_runner_handle.cc
rename to base/threading/thread_task_runner_handle.cc
index ee337b3..190e18f 100644
--- a/base/thread_task_runner_handle.cc
+++ b/base/threading/thread_task_runner_handle.cc
@@ -1,18 +1,21 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+#include <utility>
 
 #include "base/lazy_instance.h"
-#include "base/single_thread_task_runner.h"
+#include "base/logging.h"
+#include "base/threading/sequenced_task_runner_handle.h"
 #include "base/threading/thread_local.h"
 
 namespace base {
 
 namespace {
 
-base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle> >::Leaky
+base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle>>::Leaky
     lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
@@ -26,21 +29,23 @@
 
 // static
 bool ThreadTaskRunnerHandle::IsSet() {
-  return lazy_tls_ptr.Pointer()->Get() != NULL;
+  return !!lazy_tls_ptr.Pointer()->Get();
 }
 
 ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner)
-    : task_runner_(task_runner) {
+    scoped_refptr<SingleThreadTaskRunner> task_runner)
+    : task_runner_(std::move(task_runner)) {
   DCHECK(task_runner_->BelongsToCurrentThread());
-  DCHECK(!lazy_tls_ptr.Pointer()->Get());
+  // No SequencedTaskRunnerHandle (which includes ThreadTaskRunnerHandles)
+  // should already be set for this thread.
+  DCHECK(!SequencedTaskRunnerHandle::IsSet());
   lazy_tls_ptr.Pointer()->Set(this);
 }
 
 ThreadTaskRunnerHandle::~ThreadTaskRunnerHandle() {
   DCHECK(task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
-  lazy_tls_ptr.Pointer()->Set(NULL);
+  lazy_tls_ptr.Pointer()->Set(nullptr);
 }
 
 }  // namespace base
diff --git a/base/thread_task_runner_handle.h b/base/threading/thread_task_runner_handle.h
similarity index 73%
rename from base/thread_task_runner_handle.h
rename to base/threading/thread_task_runner_handle.h
index 197669e..c8e5893 100644
--- a/base/thread_task_runner_handle.h
+++ b/base/threading/thread_task_runner_handle.h
@@ -1,17 +1,17 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef BASE_THREAD_TASK_RUNNER_HANDLE_H_
-#define BASE_THREAD_TASK_RUNNER_HANDLE_H_
+#ifndef BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
 
 #include "base/base_export.h"
+#include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "base/single_thread_task_runner.h"
 
 namespace base {
 
-class SingleThreadTaskRunner;
-
 // ThreadTaskRunnerHandle stores a reference to a thread's TaskRunner
 // in thread-local storage.  Callers can then retrieve the TaskRunner
 // for the current thread by calling ThreadTaskRunnerHandle::Get().
@@ -29,13 +29,15 @@
   // Binds |task_runner| to the current thread. |task_runner| must belong
   // to the current thread for this to succeed.
   explicit ThreadTaskRunnerHandle(
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner);
+      scoped_refptr<SingleThreadTaskRunner> task_runner);
   ~ThreadTaskRunnerHandle();
 
  private:
   scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadTaskRunnerHandle);
 };
 
 }  // namespace base
 
-#endif  // BASE_THREAD_TASK_RUNNER_HANDLE_H_
+#endif  // BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index f6ecbe6..b0fd265 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -142,8 +142,8 @@
   // Ensure that the thread can work with only 12 kb and still process a
   // message.
   Thread::Options options;
-#if defined(ADDRESS_SANITIZER) && defined(OS_MACOSX)
-  // ASan bloats the stack variables and overflows the 12 kb stack on OSX.
+#if defined(ADDRESS_SANITIZER)
+  // ASan bloats the stack variables and overflows the 12 kb stack.
   options.stack_size = 24*1024;
 #else
   options.stack_size = 12*1024;
@@ -209,7 +209,8 @@
   b.Start();
 
   // Post a task that calls GetThreadId() on the created thread.
-  base::WaitableEvent event(false, false);
+  base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                            base::WaitableEvent::InitialState::NOT_SIGNALED);
   base::PlatformThreadId id_from_new_thread;
   a.task_runner()->PostTask(
       FROM_HERE, base::Bind(ReturnThreadId, &a, &id_from_new_thread, &event));
@@ -286,5 +287,13 @@
 
 TEST_F(ThreadTest, ThreadNotStarted) {
   Thread a("Inert");
-  EXPECT_EQ(nullptr, a.task_runner());
+  EXPECT_FALSE(a.task_runner());
+}
+
+TEST_F(ThreadTest, MultipleWaitUntilThreadStarted) {
+  Thread a("MultipleWaitUntilThreadStarted");
+  EXPECT_TRUE(a.Start());
+  // It's OK to call WaitUntilThreadStarted() multiple times.
+  EXPECT_TRUE(a.WaitUntilThreadStarted());
+  EXPECT_TRUE(a.WaitUntilThreadStarted());
 }
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index e6b1d64..6b4c42f 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -53,7 +53,7 @@
 
 void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
                               const base::Closure& task,
-                              bool /* task_is_slow */) {
+                              bool /*task_is_slow*/) {
   pool_->PostTask(from_here, task);
 }
 
@@ -86,9 +86,7 @@
     PendingTask pending_task = pool_->WaitForTask();
     if (pending_task.task.is_null())
       break;
-    TRACE_EVENT2("toplevel", "WorkerThread::ThreadMain::Run",
-        "src_file", pending_task.posted_from.file_name(),
-        "src_func", pending_task.posted_from.function_name());
+    TRACE_TASK_EXECUTION("WorkerThread::ThreadMain::Run", pending_task);
 
     tracked_objects::TaskStopwatch stopwatch;
     stopwatch.Start();
@@ -152,8 +150,7 @@
   DCHECK(!terminated_)
       << "This thread pool is already terminated.  Do not post new tasks.";
 
-  pending_tasks_.push(*pending_task);
-  pending_task->task.Reset();
+  pending_tasks_.push(std::move(*pending_task));
 
   // We have enough worker threads.
   if (static_cast<size_t>(num_idle_threads_) >= pending_tasks_.size()) {
@@ -188,7 +185,7 @@
     }
   }
 
-  PendingTask pending_task = pending_tasks_.front();
+  PendingTask pending_task = std::move(pending_tasks_.front());
   pending_tasks_.pop();
   return pending_task;
 }
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index f8971ac..628e2b6 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -24,6 +24,7 @@
 #ifndef BASE_THREADING_WORKER_POOL_POSIX_H_
 #define BASE_THREADING_WORKER_POOL_POSIX_H_
 
+#include <memory>
 #include <queue>
 #include <string>
 
@@ -31,7 +32,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/pending_task.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
@@ -88,7 +88,7 @@
   bool terminated_;
   // Only used for tests to ensure correct thread ordering.  It will always be
   // NULL in non-test code.
-  scoped_ptr<ConditionVariable> num_idle_threads_cv_;
+  std::unique_ptr<ConditionVariable> num_idle_threads_cv_;
 
   DISALLOW_COPY_AND_ASSIGN(PosixDynamicThreadPool);
 };
diff --git a/base/threading/worker_pool_posix_unittest.cc b/base/threading/worker_pool_posix_unittest.cc
index 99a9369..6cefeed 100644
--- a/base/threading/worker_pool_posix_unittest.cc
+++ b/base/threading/worker_pool_posix_unittest.cc
@@ -96,7 +96,8 @@
         counter_(0),
         num_waiting_to_start_(0),
         num_waiting_to_start_cv_(&num_waiting_to_start_lock_),
-        start_(true, false) {}
+        start_(WaitableEvent::ResetPolicy::MANUAL,
+               WaitableEvent::InitialState::NOT_SIGNALED) {}
 
   void SetUp() override {
     peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
diff --git a/base/threading/worker_pool_unittest.cc b/base/threading/worker_pool_unittest.cc
index 27af50b..ef4bed1 100644
--- a/base/threading/worker_pool_unittest.cc
+++ b/base/threading/worker_pool_unittest.cc
@@ -26,7 +26,10 @@
 class PostTaskAndReplyTester
     : public base::RefCountedThreadSafe<PostTaskAndReplyTester> {
  public:
-  PostTaskAndReplyTester() : finished_(false), test_event_(false, false) {}
+  PostTaskAndReplyTester()
+      : finished_(false),
+        test_event_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                    WaitableEvent::InitialState::NOT_SIGNALED) {}
 
   void RunTest() {
     ASSERT_TRUE(thread_checker_.CalledOnValidThread());
@@ -69,8 +72,10 @@
 }  // namespace
 
 TEST_F(WorkerPoolTest, PostTask) {
-  WaitableEvent test_event(false, false);
-  WaitableEvent long_test_event(false, false);
+  WaitableEvent test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                           WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent long_test_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
 
   WorkerPool::PostTask(FROM_HERE,
                        base::Bind(&WaitableEvent::Signal,
diff --git a/base/time/time.cc b/base/time/time.cc
index 76ffeb7..3670f55 100644
--- a/base/time/time.cc
+++ b/base/time/time.cc
@@ -136,11 +136,6 @@
 // Time -----------------------------------------------------------------------
 
 // static
-Time Time::Max() {
-  return Time(std::numeric_limits<int64_t>::max());
-}
-
-// static
 Time Time::FromTimeT(time_t tt) {
   if (tt == 0)
     return Time();  // Preserve 0 so we can tell it doesn't exist.
@@ -263,6 +258,14 @@
   return true;
 }
 
+// static
+bool Time::ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs) {
+  return lhs.year == rhs.year && lhs.month == rhs.month &&
+         lhs.day_of_month == rhs.day_of_month && lhs.hour == rhs.hour &&
+         lhs.minute == rhs.minute && lhs.second == rhs.second &&
+         lhs.millisecond == rhs.millisecond;
+}
+
 std::ostream& operator<<(std::ostream& os, Time time) {
   Time::Exploded exploded;
   time.UTCExplode(&exploded);
diff --git a/base/time/time.h b/base/time/time.h
index 066d910..efece96 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -56,6 +56,7 @@
 #include <limits>
 
 #include "base/base_export.h"
+#include "base/compiler_specific.h"
 #include "base/numerics/safe_math.h"
 #include "build/build_config.h"
 
@@ -74,12 +75,12 @@
 // For FILETIME in FromFileTime, until it moves to a new converter class.
 // See TODO(iyengar) below.
 #include <windows.h>
-
 #include "base/gtest_prod_util.h"
 #endif
 
 namespace base {
 
+class PlatformThreadHandle;
 class TimeDelta;
 
 // The functions in the time_internal namespace are meant to be used only by the
@@ -106,14 +107,14 @@
   }
 
   // Converts units of time to TimeDeltas.
-  static TimeDelta FromDays(int days);
-  static TimeDelta FromHours(int hours);
-  static TimeDelta FromMinutes(int minutes);
-  static TimeDelta FromSeconds(int64_t secs);
-  static TimeDelta FromMilliseconds(int64_t ms);
-  static TimeDelta FromSecondsD(double secs);
-  static TimeDelta FromMillisecondsD(double ms);
-  static TimeDelta FromMicroseconds(int64_t us);
+  static constexpr TimeDelta FromDays(int days);
+  static constexpr TimeDelta FromHours(int hours);
+  static constexpr TimeDelta FromMinutes(int minutes);
+  static constexpr TimeDelta FromSeconds(int64_t secs);
+  static constexpr TimeDelta FromMilliseconds(int64_t ms);
+  static constexpr TimeDelta FromSecondsD(double secs);
+  static constexpr TimeDelta FromMillisecondsD(double ms);
+  static constexpr TimeDelta FromMicroseconds(int64_t us);
 #if defined(OS_WIN)
   static TimeDelta FromQPCValue(LONGLONG qpc_value);
 #endif
@@ -222,22 +223,22 @@
   }
 
   // Comparison operators.
-  bool operator==(TimeDelta other) const {
+  constexpr bool operator==(TimeDelta other) const {
     return delta_ == other.delta_;
   }
-  bool operator!=(TimeDelta other) const {
+  constexpr bool operator!=(TimeDelta other) const {
     return delta_ != other.delta_;
   }
-  bool operator<(TimeDelta other) const {
+  constexpr bool operator<(TimeDelta other) const {
     return delta_ < other.delta_;
   }
-  bool operator<=(TimeDelta other) const {
+  constexpr bool operator<=(TimeDelta other) const {
     return delta_ <= other.delta_;
   }
-  bool operator>(TimeDelta other) const {
+  constexpr bool operator>(TimeDelta other) const {
     return delta_ > other.delta_;
   }
-  bool operator>=(TimeDelta other) const {
+  constexpr bool operator>=(TimeDelta other) const {
     return delta_ >= other.delta_;
   }
 
@@ -248,10 +249,14 @@
   // Constructs a delta given the duration in microseconds. This is private
   // to avoid confusion by callers with an integer constructor. Use
   // FromSeconds, FromMilliseconds, etc. instead.
-  explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
+  constexpr explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
 
   // Private method to build a delta from a double.
-  static TimeDelta FromDouble(double value);
+  static constexpr TimeDelta FromDouble(double value);
+
+  // Private method to build a delta from the product of a user-provided value
+  // and a known-positive value.
+  static constexpr TimeDelta FromProduct(int64_t value, int64_t positive_value);
 
   // Delta in microseconds.
   int64_t delta_;
@@ -307,6 +312,12 @@
   // Returns true if this object represents the maximum time.
   bool is_max() const { return us_ == std::numeric_limits<int64_t>::max(); }
 
+  // Returns the maximum time, which should be greater than any reasonable time
+  // with which we might compare it.
+  static TimeClass Max() {
+    return TimeClass(std::numeric_limits<int64_t>::max());
+  }
+
   // For serializing only. Use FromInternalValue() to reconstitute. Please don't
   // use this and do arithmetic on it, as it is more error prone than using the
   // provided operators.
@@ -434,10 +445,6 @@
   // times are increasing, or that two calls to Now() won't be the same.
   static Time Now();
 
-  // Returns the maximum time, which should be greater than any reasonable time
-  // with which we might compare it.
-  static Time Max();
-
   // Returns the current time. Same as Now() except that this function always
   // uses system time so that there are no discrepancies between the returned
   // time and system time even on virtual environments including our test bot.
@@ -515,11 +522,29 @@
 
   // Converts an exploded structure representing either the local time or UTC
   // into a Time class.
+  // TODO(maksims): Get rid of these in favor of the methods below when
+  // all the callers stop using these ones.
   static Time FromUTCExploded(const Exploded& exploded) {
-    return FromExploded(false, exploded);
+    base::Time time;
+    ignore_result(FromUTCExploded(exploded, &time));
+    return time;
   }
   static Time FromLocalExploded(const Exploded& exploded) {
-    return FromExploded(true, exploded);
+    base::Time time;
+    ignore_result(FromLocalExploded(exploded, &time));
+    return time;
+  }
+
+  // Converts an exploded structure representing either the local time or UTC
+  // into a Time class. Returns false on a failure when, for example, a day of
+  // month is set to 31 on a 28-30 day month.
+  static bool FromUTCExploded(const Exploded& exploded,
+                              Time* time) WARN_UNUSED_RESULT {
+    return FromExploded(false, exploded, time);
+  }
+  static bool FromLocalExploded(const Exploded& exploded,
+                                Time* time) WARN_UNUSED_RESULT {
+    return FromExploded(true, exploded, time);
   }
 
   // Converts a string representation of time to a Time object.
@@ -560,8 +585,12 @@
   void Explode(bool is_local, Exploded* exploded) const;
 
   // Unexplodes a given time assuming the source is either local time
-  // |is_local = true| or UTC |is_local = false|.
-  static Time FromExploded(bool is_local, const Exploded& exploded);
+  // |is_local = true| or UTC |is_local = false|. Function returns false on
+  // failure and sets |time| to Time(0). Otherwise returns true and sets |time|
+  // to non-exploded time.
+  static bool FromExploded(bool is_local,
+                           const Exploded& exploded,
+                           Time* time) WARN_UNUSED_RESULT;
 
   // Converts a string representation of time to a Time object.
   // An example of a time string which is converted is as below:-
@@ -573,65 +602,83 @@
   static bool FromStringInternal(const char* time_string,
                                  bool is_local,
                                  Time* parsed_time);
+
+  // Comparison does not consider |day_of_week| when doing the operation.
+  static bool ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs);
 };
 
-// Inline the TimeDelta factory methods, for fast TimeDelta construction.
-
 // static
-inline TimeDelta TimeDelta::FromDays(int days) {
-  if (days == std::numeric_limits<int>::max())
-    return Max();
-  return TimeDelta(days * Time::kMicrosecondsPerDay);
+constexpr TimeDelta TimeDelta::FromDays(int days) {
+  return days == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(days * Time::kMicrosecondsPerDay);
 }
 
 // static
-inline TimeDelta TimeDelta::FromHours(int hours) {
-  if (hours == std::numeric_limits<int>::max())
-    return Max();
-  return TimeDelta(hours * Time::kMicrosecondsPerHour);
+constexpr TimeDelta TimeDelta::FromHours(int hours) {
+  return hours == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(hours * Time::kMicrosecondsPerHour);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMinutes(int minutes) {
-  if (minutes == std::numeric_limits<int>::max())
-    return Max();
-  return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+constexpr TimeDelta TimeDelta::FromMinutes(int minutes) {
+  return minutes == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(minutes * Time::kMicrosecondsPerMinute);
 }
 
 // static
-inline TimeDelta TimeDelta::FromSeconds(int64_t secs) {
-  return TimeDelta(secs) * Time::kMicrosecondsPerSecond;
+constexpr TimeDelta TimeDelta::FromSeconds(int64_t secs) {
+  return FromProduct(secs, Time::kMicrosecondsPerSecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
-  return TimeDelta(ms) * Time::kMicrosecondsPerMillisecond;
+constexpr TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
+  return FromProduct(ms, Time::kMicrosecondsPerMillisecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromSecondsD(double secs) {
+constexpr TimeDelta TimeDelta::FromSecondsD(double secs) {
   return FromDouble(secs * Time::kMicrosecondsPerSecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMillisecondsD(double ms) {
+constexpr TimeDelta TimeDelta::FromMillisecondsD(double ms) {
   return FromDouble(ms * Time::kMicrosecondsPerMillisecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
+constexpr TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
   return TimeDelta(us);
 }
 
 // static
-inline TimeDelta TimeDelta::FromDouble(double value) {
-  double max_magnitude = std::numeric_limits<int64_t>::max();
-  TimeDelta delta = TimeDelta(static_cast<int64_t>(value));
-  if (value > max_magnitude)
-    delta = Max();
-  else if (value < -max_magnitude)
-    delta = -Max();
-  return delta;
+constexpr TimeDelta TimeDelta::FromDouble(double value) {
+  // TODO(crbug.com/612601): Use saturated_cast<int64_t>(value) once we sort out
+  // the Min() behavior.
+  return value > std::numeric_limits<int64_t>::max()
+             ? Max()
+             : value < -std::numeric_limits<int64_t>::max()
+                   ? -Max()
+                   : TimeDelta(static_cast<int64_t>(value));
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromProduct(int64_t value,
+                                           int64_t positive_value) {
+  return (
+#if !defined(_PREFAST_) || !defined(OS_WIN)
+          // Avoid internal compiler errors in /analyze builds with VS 2015
+          // update 3.
+          // https://connect.microsoft.com/VisualStudio/feedback/details/2870865
+          DCHECK(positive_value > 0),
+#endif
+          value > std::numeric_limits<int64_t>::max() / positive_value
+              ? Max()
+              : value < -std::numeric_limits<int64_t>::max() / positive_value
+                    ? -Max()
+                    : TimeDelta(value * positive_value));
 }
 
 // For logging use only.
@@ -642,6 +689,15 @@
 // Represents monotonically non-decreasing clock time.
 class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
  public:
+  // The underlying clock used to generate new TimeTicks.
+  enum class Clock {
+    LINUX_CLOCK_MONOTONIC,
+    IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME,
+    MAC_MACH_ABSOLUTE_TIME,
+    WIN_QPC,
+    WIN_ROLLOVER_PROTECTED_TIME_GET_TIME
+  };
+
   TimeTicks() : TimeBase(0) {
   }
 
@@ -680,6 +736,11 @@
   TimeTicks SnappedToNextTick(TimeTicks tick_phase,
                               TimeDelta tick_interval) const;
 
+  // Returns an enum indicating the underlying clock being used to generate
+  // TimeTicks timestamps. This function should only be used for debugging and
+  // logging purposes.
+  static Clock GetClock();
+
 #if defined(OS_WIN)
  protected:
   typedef DWORD (*TickFunctionType)(void);
@@ -735,11 +796,18 @@
   // absolutely needed, call WaitUntilInitialized() before this method.
   static ThreadTicks Now();
 
+#if defined(OS_WIN)
+  // Similar to Now() above except this returns thread-specific CPU time for an
+  // arbitrary thread. All comments for Now() method above apply apply to this
+  // method as well.
+  static ThreadTicks GetForThread(const PlatformThreadHandle& thread_handle);
+#endif
+
  private:
   friend class time_internal::TimeBase<ThreadTicks>;
 
-  // Please use Now() to create a new object. This is for internal use
-  // and testing.
+  // Please use Now() or GetForThread() to create a new object. This is for
+  // internal use and testing.
   explicit ThreadTicks(int64_t us) : TimeBase(us) {}
 
 #if defined(OS_WIN)
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
index f2bc5ed..373ec3a 100644
--- a/base/time/time_mac.cc
+++ b/base/time/time_mac.cc
@@ -34,7 +34,7 @@
   struct timeval boottime;
   int mib[2] = {CTL_KERN, KERN_BOOTTIME};
   size_t size = sizeof(boottime);
-  int kr = sysctl(mib, arraysize(mib), &boottime, &size, NULL, 0);
+  int kr = sysctl(mib, arraysize(mib), &boottime, &size, nullptr, 0);
   DCHECK_EQ(KERN_SUCCESS, kr);
   base::TimeDelta time_difference = base::Time::Now() -
       (base::Time::FromTimeT(boottime.tv_sec) +
@@ -92,9 +92,11 @@
   MACH_DCHECK(kr == KERN_SUCCESS, kr) << "thread_info";
 
   base::CheckedNumeric<int64_t> absolute_micros(
-      thread_info_data.user_time.seconds);
+      thread_info_data.user_time.seconds +
+      thread_info_data.system_time.seconds);
   absolute_micros *= base::Time::kMicrosecondsPerSecond;
-  absolute_micros += thread_info_data.user_time.microseconds;
+  absolute_micros += (thread_info_data.user_time.microseconds +
+                      thread_info_data.system_time.microseconds);
   return absolute_micros.ValueOrDie();
 #endif  // defined(OS_IOS)
 }
@@ -166,22 +168,44 @@
 }
 
 // static
-Time Time::FromExploded(bool is_local, const Exploded& exploded) {
-  CFGregorianDate date;
-  date.second = exploded.second +
-      exploded.millisecond / static_cast<double>(kMillisecondsPerSecond);
-  date.minute = exploded.minute;
-  date.hour = exploded.hour;
-  date.day = exploded.day_of_month;
-  date.month = exploded.month;
-  date.year = exploded.year;
-
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
   base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
-      is_local ? CFTimeZoneCopySystem() : NULL);
-  CFAbsoluteTime seconds = CFGregorianDateGetAbsoluteTime(date, time_zone) +
-      kCFAbsoluteTimeIntervalSince1970;
-  return Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
-              kWindowsEpochDeltaMicroseconds);
+      is_local
+          ? CFTimeZoneCopySystem()
+          : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+      kCFAllocatorDefault, kCFGregorianCalendar));
+  CFCalendarSetTimeZone(gregorian, time_zone);
+  CFAbsoluteTime absolute_time;
+  // 'S' is not defined in componentDesc in Apple documentation, but can be
+  // found at http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+  CFCalendarComposeAbsoluteTime(
+      gregorian, &absolute_time, "yMdHmsS", exploded.year, exploded.month,
+      exploded.day_of_month, exploded.hour, exploded.minute, exploded.second,
+      exploded.millisecond);
+  CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
+
+  base::Time converted_time =
+      Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
+           kWindowsEpochDeltaMicroseconds);
+
+  // If |exploded.day_of_month| is set to 31
+  // on a 28-30 day month, it will return the first day of the next month.
+  // Thus round-trip the time and compare the initial |exploded| with
+  // |utc_to_exploded| time.
+  base::Time::Exploded to_exploded;
+  if (!is_local)
+    converted_time.UTCExplode(&to_exploded);
+  else
+    converted_time.LocalExplode(&to_exploded);
+
+  if (ExplodedMostlyEquals(to_exploded, exploded)) {
+    *time = converted_time;
+    return true;
+  }
+
+  *time = Time(0);
+  return false;
 }
 
 void Time::Explode(bool is_local, Exploded* exploded) const {
@@ -195,19 +219,25 @@
                            kCFAbsoluteTimeIntervalSince1970;
 
   base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
-      is_local ? CFTimeZoneCopySystem() : NULL);
-  CFGregorianDate date = CFAbsoluteTimeGetGregorianDate(seconds, time_zone);
-  // 1 = Monday, ..., 7 = Sunday.
-  int cf_day_of_week = CFAbsoluteTimeGetDayOfWeek(seconds, time_zone);
-
-  exploded->year = date.year;
-  exploded->month = date.month;
-  exploded->day_of_week = cf_day_of_week % 7;
-  exploded->day_of_month = date.day;
-  exploded->hour = date.hour;
-  exploded->minute = date.minute;
+      is_local
+          ? CFTimeZoneCopySystem()
+          : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+      kCFAllocatorDefault, kCFGregorianCalendar));
+  CFCalendarSetTimeZone(gregorian, time_zone);
+  int second, day_of_week;
+  // 'E' sets the day of week, but is not defined in componentDesc in Apple
+  // documentation. It can be found in open source code here:
+  // http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+  CFCalendarDecomposeAbsoluteTime(gregorian, seconds, "yMdHmsE",
+                                  &exploded->year, &exploded->month,
+                                  &exploded->day_of_month, &exploded->hour,
+                                  &exploded->minute, &second, &day_of_week);
   // Make sure seconds are rounded down towards -infinity.
-  exploded->second = floor(date.second);
+  exploded->second = floor(second);
+  // |Exploded|'s convention for day of week is 0 = Sunday, i.e. different
+  // from CF's 1 = Sunday.
+  exploded->day_of_week = (day_of_week - 1) % 7;
   // Calculate milliseconds ourselves, since we rounded the |seconds|, making
   // sure to round towards -infinity.
   exploded->millisecond =
@@ -229,6 +259,15 @@
 }
 
 // static
+TimeTicks::Clock TimeTicks::GetClock() {
+#if defined(OS_IOS)
+  return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
+#else
+  return Clock::MAC_MACH_ABSOLUTE_TIME;
+#endif  // defined(OS_IOS)
+}
+
+// static
 ThreadTicks ThreadTicks::Now() {
   return ThreadTicks(ComputeThreadTicks());
 }
diff --git a/base/time/time_posix.cc b/base/time/time_posix.cc
index 4aadee6..495e249 100644
--- a/base/time/time_posix.cc
+++ b/base/time/time_posix.cc
@@ -211,7 +211,7 @@
 }
 
 // static
-Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
   struct tm timestruct;
   timestruct.tm_sec    = exploded.second;
   timestruct.tm_min    = exploded.minute;
@@ -301,8 +301,26 @@
   }
 
   // Adjust from Unix (1970) to Windows (1601) epoch.
-  return Time((milliseconds * kMicrosecondsPerMillisecond) +
-      kWindowsEpochDeltaMicroseconds);
+  base::Time converted_time =
+      Time((milliseconds * kMicrosecondsPerMillisecond) +
+           kWindowsEpochDeltaMicroseconds);
+
+  // If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
+  // return the first day of the next month. Thus round-trip the time and
+  // compare the initial |exploded| with |utc_to_exploded| time.
+  base::Time::Exploded to_exploded;
+  if (!is_local)
+    converted_time.UTCExplode(&to_exploded);
+  else
+    converted_time.LocalExplode(&to_exploded);
+
+  if (ExplodedMostlyEquals(to_exploded, exploded)) {
+    *time = converted_time;
+    return true;
+  }
+
+  *time = Time(0);
+  return false;
 }
 
 // TimeTicks ------------------------------------------------------------------
@@ -312,6 +330,11 @@
 }
 
 // static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::LINUX_CLOCK_MONOTONIC;
+}
+
+// static
 bool TimeTicks::IsHighResolution() {
   return true;
 }
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
index 8a6a7f5..4f47d56 100644
--- a/base/time/time_unittest.cc
+++ b/base/time/time_unittest.cc
@@ -21,6 +21,52 @@
 
 namespace {
 
+TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
+  // FromUTCExploded must set time to Time(0) and failure, if the day is set to
+  // 31 on a 28-30 day month. Test |exploded| returns Time(0) on 31st of
+  // February and 31st of April. New implementation handles this.
+
+  const struct DateTestData {
+    Time::Exploded explode;
+    bool is_valid;
+  } kDateTestData[] = {
+      // 31st of February
+      {{2016, 2, 0, 31, 12, 30, 0, 0}, true},
+      // 31st of April
+      {{2016, 4, 0, 31, 8, 43, 0, 0}, true},
+      // Negative month
+      {{2016, -5, 0, 2, 4, 10, 0, 0}, false},
+      // Negative date of month
+      {{2016, 6, 0, -15, 2, 50, 0, 0}, false},
+      // Negative hours
+      {{2016, 7, 0, 10, -11, 29, 0, 0}, false},
+      // Negative minutes
+      {{2016, 3, 0, 14, 10, -29, 0, 0}, false},
+      // Negative seconds
+      {{2016, 10, 0, 25, 7, 47, -30, 0}, false},
+      // Negative milliseconds
+      {{2016, 10, 0, 25, 7, 47, 20, -500}, false},
+      // Hours are too large
+      {{2016, 7, 0, 10, 26, 29, 0, 0}, false},
+      // Minutes are too large
+      {{2016, 3, 0, 14, 10, 78, 0, 0}, false},
+      // Seconds are too large
+      {{2016, 10, 0, 25, 7, 47, 234, 0}, false},
+      // Milliseconds are too large
+      {{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
+  };
+
+  for (const auto& test : kDateTestData) {
+    EXPECT_EQ(test.explode.HasValidValues(), test.is_valid);
+
+    base::Time result;
+    EXPECT_FALSE(base::Time::FromUTCExploded(test.explode, &result));
+    EXPECT_TRUE(result.is_null());
+    EXPECT_FALSE(base::Time::FromLocalExploded(test.explode, &result));
+    EXPECT_TRUE(result.is_null());
+  }
+}
+
 // Specialized test fixture allowing time strings without timezones to be
 // tested by comparing them to a known time in the local zone.
 // See also pr_time_unittests.cc
@@ -80,7 +126,8 @@
   EXPECT_EQ(tms.tm_sec, exploded.second);
 
   // Convert exploded back to the time struct.
-  Time our_time_2 = Time::FromLocalExploded(exploded);
+  Time our_time_2;
+  EXPECT_TRUE(Time::FromLocalExploded(exploded, &our_time_2));
   EXPECT_TRUE(our_time_1 == our_time_2);
 
   time_t now_t_2 = our_time_2.ToTimeT();
@@ -119,7 +166,8 @@
   Time::Exploded exploded1 = {0};
   now.UTCExplode(&exploded1);
   exploded1.millisecond = 500;
-  Time time = Time::FromUTCExploded(exploded1);
+  Time time;
+  EXPECT_TRUE(Time::FromUTCExploded(exploded1, &time));
   Time::Exploded exploded2 = {0};
   time.UTCExplode(&exploded2);
   EXPECT_EQ(exploded1.millisecond, exploded2.millisecond);
@@ -137,7 +185,8 @@
   Time::Exploded exploded;
   a.LocalExplode(&exploded);
 
-  Time b = Time::FromLocalExploded(exploded);
+  Time b;
+  EXPECT_TRUE(Time::FromLocalExploded(exploded, &b));
 
   // The exploded structure doesn't have microseconds, and on Mac & Linux, the
   // internal OS conversion uses seconds, which will cause truncation. So we
@@ -150,7 +199,8 @@
   Time::Exploded exploded;
   a.UTCExplode(&exploded);
 
-  Time b = Time::FromUTCExploded(exploded);
+  Time b;
+  EXPECT_TRUE(Time::FromUTCExploded(exploded, &b));
   EXPECT_TRUE((a - b) < TimeDelta::FromSeconds(1));
 }
 
@@ -565,7 +615,8 @@
   static char buffer[] = "TZ=America/Santiago";
   putenv(buffer);
   tzset();
-  Time t = Time::FromLocalExploded(midnight);
+  Time t;
+  EXPECT_TRUE(Time::FromLocalExploded(midnight, &t));
   EXPECT_EQ(1381633200, t.ToTimeT());
 }
 #endif  // OS_ANDROID
@@ -723,16 +774,21 @@
 }
 
 TEST(TimeDelta, FromAndIn) {
-  EXPECT_TRUE(TimeDelta::FromDays(2) == TimeDelta::FromHours(48));
-  EXPECT_TRUE(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180));
-  EXPECT_TRUE(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120));
-  EXPECT_TRUE(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000));
-  EXPECT_TRUE(TimeDelta::FromMilliseconds(2) ==
-              TimeDelta::FromMicroseconds(2000));
-  EXPECT_TRUE(TimeDelta::FromSecondsD(2.3) ==
-              TimeDelta::FromMilliseconds(2300));
-  EXPECT_TRUE(TimeDelta::FromMillisecondsD(2.5) ==
-              TimeDelta::FromMicroseconds(2500));
+  // static_assert also checks that the contained expression is a constant
+  // expression, meaning all its components are suitable for initializing global
+  // variables.
+  static_assert(TimeDelta::FromDays(2) == TimeDelta::FromHours(48), "");
+  static_assert(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180), "");
+  static_assert(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120), "");
+  static_assert(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000),
+                "");
+  static_assert(
+      TimeDelta::FromMilliseconds(2) == TimeDelta::FromMicroseconds(2000), "");
+  static_assert(
+      TimeDelta::FromSecondsD(2.3) == TimeDelta::FromMilliseconds(2300), "");
+  static_assert(
+      TimeDelta::FromMillisecondsD(2.5) == TimeDelta::FromMicroseconds(2500),
+      "");
   EXPECT_EQ(13, TimeDelta::FromDays(13).InDays());
   EXPECT_EQ(13, TimeDelta::FromHours(13).InHours());
   EXPECT_EQ(13, TimeDelta::FromMinutes(13).InMinutes());
@@ -782,7 +838,8 @@
   exploded.minute = 0;
   exploded.second = 0;
   exploded.millisecond = 0;
-  Time t = Time::FromUTCExploded(exploded);
+  Time t;
+  EXPECT_TRUE(Time::FromUTCExploded(exploded, &t));
   // Unix 1970 epoch.
   EXPECT_EQ(INT64_C(11644473600000000), t.ToInternalValue());
 
diff --git a/base/timer/hi_res_timer_manager_unittest.cc b/base/timer/hi_res_timer_manager_unittest.cc
index 9416048..a0b0f93 100644
--- a/base/timer/hi_res_timer_manager_unittest.cc
+++ b/base/timer/hi_res_timer_manager_unittest.cc
@@ -4,9 +4,9 @@
 
 #include "base/timer/hi_res_timer_manager.h"
 
+#include <memory>
 #include <utility>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/power_monitor/power_monitor.h"
 #include "base/power_monitor/power_monitor_device_source.h"
@@ -22,9 +22,9 @@
   // Windows, which makes this test flaky if you run while the machine
   // goes in or out of AC power.
   base::MessageLoop loop(base::MessageLoop::TYPE_UI);
-  scoped_ptr<base::PowerMonitorSource> power_monitor_source(
+  std::unique_ptr<base::PowerMonitorSource> power_monitor_source(
       new base::PowerMonitorDeviceSource());
-  scoped_ptr<base::PowerMonitor> power_monitor(
+  std::unique_ptr<base::PowerMonitor> power_monitor(
       new base::PowerMonitor(std::move(power_monitor_source)));
 
   HighResolutionTimerManager manager;
diff --git a/base/timer/timer.cc b/base/timer/timer.cc
index fa6b8cd..e554905 100644
--- a/base/timer/timer.cc
+++ b/base/timer/timer.cc
@@ -9,8 +9,8 @@
 #include "base/logging.h"
 #include "base/memory/ref_counted.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
diff --git a/base/timer/timer_unittest.cc b/base/timer/timer_unittest.cc
index b1d3c3e..6fcd25b 100644
--- a/base/timer/timer_unittest.cc
+++ b/base/timer/timer_unittest.cc
@@ -2,13 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/timer/timer.h"
+
 #include <stddef.h>
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
 #include "base/test/test_simple_task_runner.h"
-#include "base/timer/timer.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -78,7 +82,7 @@
   }
 
   bool* did_run_;
-  scoped_ptr<base::OneShotTimer> timer_;
+  std::unique_ptr<base::OneShotTimer> timer_;
 };
 
 class RepeatingTimerTester {
@@ -113,7 +117,7 @@
   OneShotTimerTester f(&did_run);
   f.Start();
 
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   EXPECT_TRUE(did_run);
 }
@@ -125,7 +129,7 @@
   OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
 
   // This should run before the timer expires.
-  base::MessageLoop::current()->DeleteSoon(FROM_HERE, a);
+  base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
 
   // Now start the timer.
   a->Start();
@@ -134,7 +138,7 @@
   OneShotTimerTester b(&did_run_b);
   b.Start();
 
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   EXPECT_FALSE(did_run_a);
   EXPECT_TRUE(did_run_b);
@@ -148,7 +152,7 @@
   OneShotSelfDeletingTimerTester f(&did_run);
   f.Start();
 
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   EXPECT_TRUE(did_run);
 }
@@ -161,7 +165,7 @@
   RepeatingTimerTester f(&did_run, delay);
   f.Start();
 
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   EXPECT_TRUE(did_run);
 }
@@ -174,7 +178,7 @@
   RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
 
   // This should run before the timer expires.
-  base::MessageLoop::current()->DeleteSoon(FROM_HERE, a);
+  base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
 
   // Now start the timer.
   a->Start();
@@ -183,7 +187,7 @@
   RepeatingTimerTester b(&did_run_b, delay);
   b.Start();
 
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   EXPECT_FALSE(did_run_a);
   EXPECT_TRUE(did_run_b);
@@ -213,7 +217,7 @@
   bool did_run = false;
   OneShotTimerTester tester(&did_run);
   tester.Start();
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   ASSERT_FALSE(target.signaled());
 }
@@ -229,7 +233,7 @@
   bool did_run = false;
   OneShotTimerTester tester(&did_run, 100 /* milliseconds */);
   tester.Start();
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   ASSERT_TRUE(target.signaled());
 }
@@ -268,7 +272,7 @@
   bool did_run = false;
   OneShotTimerTester tester(&did_run, 300);
   tester.Start();
-  base::MessageLoop::current()->Run();
+  base::RunLoop().Run();
 
   ASSERT_TRUE(target.signaled());
 }
@@ -511,7 +515,7 @@
     timer.Stop();
     timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
                 base::Bind(&SetCallbackHappened2));
-    base::MessageLoop::current()->Run();
+    base::RunLoop().Run();
     EXPECT_FALSE(g_callback_happened1);
     EXPECT_TRUE(g_callback_happened2);
   }
@@ -527,7 +531,7 @@
     timer.Reset();
     // Since Reset happened before task ran, the user_task must not be cleared:
     ASSERT_FALSE(timer.user_task().is_null());
-    base::MessageLoop::current()->Run();
+    base::RunLoop().Run();
     EXPECT_TRUE(g_callback_happened1);
   }
 }
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index a266cd5..0a04d62 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -156,7 +156,7 @@
 //   };
 //
 //   TRACE_EVENT1("foo", "bar", "data",
-//                scoped_refptr<ConvertableToTraceFormat>(new MyData()));
+//                std::unique_ptr<ConvertableToTraceFormat>(new MyData()));
 //
 // The trace framework will take ownership if the passed pointer and it will
 // be free'd when the trace buffer is flushed.
@@ -926,6 +926,16 @@
                                    name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
                                    arg1_val, arg2_name, arg2_val)
 
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+  INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+
+// TRACE_EVENT_METADATA* events are information related to other
+// injected events, not events in their own right.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
 // Records a clock sync event.
 #define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id)                               \
   INTERNAL_TRACE_EVENT_ADD(                                                    \
@@ -962,6 +972,21 @@
       TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name,         \
       TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
 
+// Records entering and leaving trace event contexts. |category_group| and
+// |name| specify the context category and type. |context| is a
+// snapshotted context object id.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                              \
+      TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name,     \
+      TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                              \
+      TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name,     \
+      TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name,       \
+                                      TRACE_ID_DONT_MANGLE(context))
+
 // Macro to efficiently determine if a given category group is enabled.
 #define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)             \
   do {                                                                      \
@@ -1025,6 +1050,8 @@
 #define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
 #define TRACE_EVENT_PHASE_MARK ('R')
 #define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
+#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
+#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
 
 // Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
 #define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
diff --git a/base/trace_event/etw_manifest/BUILD.gn b/base/trace_event/etw_manifest/BUILD.gn
index 1e16672..19c4ecf 100644
--- a/base/trace_event/etw_manifest/BUILD.gn
+++ b/base/trace_event/etw_manifest/BUILD.gn
@@ -18,8 +18,12 @@
 
   user_mode_logging = true
 
-  # TOOD(brucedawson) bug 569989: Enable ETW manifest and compile and link it
-  # into the proper places. Enabling as-is may add the resources to too many
-  # targets. See the bug for more information.
+  # The only code generated from chrome_events_win.man is a header file that
+  # is included by trace_event_etw_export_win.cc, so there is no need to
+  # compile any generated code. The other thing which compile_generated_code
+  # controls in this context is linking in the .res file generated from the
+  # manifest. However this is only needed for ETW provider registration which
+  # is done by UIforETW (https://github.com/google/UIforETW) and therefore the
+  # manifest resource can be skipped in Chrome.
   compile_generated_code = false
 }
diff --git a/base/trace_event/heap_profiler.h b/base/trace_event/heap_profiler.h
new file mode 100644
index 0000000..cf57524
--- /dev/null
+++ b/base/trace_event/heap_profiler.h
@@ -0,0 +1,89 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_H
+#define BASE_TRACE_EVENT_HEAP_PROFILER_H
+
+#include "base/compiler_specific.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+// This header file defines the set of macros that are used to track memory
+// usage in the heap profiler. This is in addition to the macros defined in
+// trace_event.h and are specific to heap profiler. This file also defines
+// implementation details of these macros.
+
+// Implementation detail: heap profiler macros create temporary variables to
+// keep instrumentation overhead low. These macros give each temporary variable
+// a unique name based on the line number to prevent name collisions.
+#define INTERNAL_HEAP_PROFILER_UID3(a, b) heap_profiler_unique_##a##b
+#define INTERNAL_HEAP_PROFILER_UID2(a, b) INTERNAL_HEAP_PROFILER_UID3(a, b)
+#define INTERNAL_HEAP_PROFILER_UID(name_prefix) \
+  INTERNAL_HEAP_PROFILER_UID2(name_prefix, __LINE__)
+
+// Scoped tracker for task execution context in the heap profiler.
+#define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
+  trace_event_internal::HeapProfilerScopedTaskExecutionTracker
+
+// A scoped ignore event used to tell heap profiler to ignore all the
+// allocations in the scope. It is useful to exclude allocations made for
+// tracing from the heap profiler dumps.
+#define HEAP_PROFILER_SCOPED_IGNORE                                          \
+  trace_event_internal::HeapProfilerScopedIgnore INTERNAL_HEAP_PROFILER_UID( \
+      scoped_ignore)
+
+namespace trace_event_internal {
+
+// HeapProfilerScopedTaskExecutionTracker records the current task's context in
+// the heap profiler.
+class HeapProfilerScopedTaskExecutionTracker {
+ public:
+  inline explicit HeapProfilerScopedTaskExecutionTracker(
+      const char* task_context)
+      : context_(task_context) {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+                 AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PushCurrentTaskContext(context_);
+    }
+  }
+
+  inline ~HeapProfilerScopedTaskExecutionTracker() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+                 AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PopCurrentTaskContext(context_);
+    }
+  }
+
+ private:
+  const char* context_;
+};
+
+class BASE_EXPORT HeapProfilerScopedIgnore {
+ public:
+  inline HeapProfilerScopedIgnore() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(
+            AllocationContextTracker::capture_mode() !=
+            AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->begin_ignore_scope();
+    }
+  }
+  inline ~HeapProfilerScopedIgnore() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(
+            AllocationContextTracker::capture_mode() !=
+            AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->end_ignore_scope();
+    }
+  }
+};
+
+}  // namespace trace_event_internal
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_H
diff --git a/base/trace_event/heap_profiler_allocation_context.cc b/base/trace_event/heap_profiler_allocation_context.cc
index dcef5bd..0f330a8 100644
--- a/base/trace_event/heap_profiler_allocation_context.cc
+++ b/base/trace_event/heap_profiler_allocation_context.cc
@@ -12,42 +12,62 @@
 namespace base {
 namespace trace_event {
 
-// Constructor that does not initialize members.
-AllocationContext::AllocationContext() {}
-
-// static
-AllocationContext AllocationContext::Empty() {
-  AllocationContext ctx;
-
-  for (size_t i = 0; i < arraysize(ctx.backtrace.frames); i++)
-    ctx.backtrace.frames[i] = nullptr;
-
-  ctx.type_name = nullptr;
-
-  return ctx;
+bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
+  return lhs.value < rhs.value;
 }
 
+bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
+  return lhs.value == rhs.value;
+}
+
+bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
+  return !(lhs.value == rhs.value);
+}
+
+Backtrace::Backtrace(): frame_count(0) {}
+
 bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
-  // Pointer equality of the stack frames is assumed, so instead of doing a deep
-  // string comparison on all of the frames, a |memcmp| suffices.
-  return std::memcmp(lhs.frames, rhs.frames, sizeof(lhs.frames)) == 0;
+  if (lhs.frame_count != rhs.frame_count) return false;
+  return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
 }
 
+bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
+  return !(lhs == rhs);
+}
+
+AllocationContext::AllocationContext(): type_name(nullptr) {}
+
+AllocationContext::AllocationContext(const Backtrace& backtrace,
+                                     const char* type_name)
+  : backtrace(backtrace), type_name(type_name) {}
+
 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
   return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
 }
 
+bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
+  return !(lhs == rhs);
+}
 }  // namespace trace_event
 }  // namespace base
 
 namespace BASE_HASH_NAMESPACE {
 using base::trace_event::AllocationContext;
 using base::trace_event::Backtrace;
+using base::trace_event::StackFrame;
+
+size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
+  return hash<const void*>()(frame.value);
+}
 
 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
-  return base::Hash(
-    std::string(reinterpret_cast<const char*>(backtrace.frames),
-                sizeof(backtrace.frames)));
+  const void* values[Backtrace::kMaxFrameCount];
+  for (size_t i = 0; i != backtrace.frame_count; ++i) {
+    values[i] = backtrace.frames[i].value;
+  }
+  return base::SuperFastHash(
+      reinterpret_cast<const char*>(values),
+      static_cast<int>(backtrace.frame_count * sizeof(*values)));
 }
 
 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
diff --git a/base/trace_event/heap_profiler_allocation_context.h b/base/trace_event/heap_profiler_allocation_context.h
index 8544c78..24e2dec 100644
--- a/base/trace_event/heap_profiler_allocation_context.h
+++ b/base/trace_event/heap_profiler_allocation_context.h
@@ -29,32 +29,62 @@
 // memory used for tracing and accuracy. Measurements done on a prototype
 // revealed that:
 //
-// - In 60 percent of the cases, stack depth <= 7.
-// - In 87 percent of the cases, stack depth <= 9.
-// - In 95 percent of the cases, stack depth <= 11.
+// - In 60 percent of the cases, pseudo stack depth <= 7.
+// - In 87 percent of the cases, pseudo stack depth <= 9.
+// - In 95 percent of the cases, pseudo stack depth <= 11.
 //
 // See the design doc (https://goo.gl/4s7v7b) for more details.
 
-using StackFrame = const char*;
+// Represents (pseudo) stack frame. Used in Backtrace class below.
+//
+// Conceptually stack frame is identified by its value, and type is used
+// mostly to properly format the value. Value is expected to be a valid
+// pointer from process' address space.
+struct BASE_EXPORT StackFrame {
+  enum class Type {
+    TRACE_EVENT_NAME,   // const char* string
+    THREAD_NAME,        // const char* thread name
+    PROGRAM_COUNTER,    // as returned by stack tracing (e.g. by StackTrace)
+  };
+
+  static StackFrame FromTraceEventName(const char* name) {
+    return {Type::TRACE_EVENT_NAME, name};
+  }
+  static StackFrame FromThreadName(const char* name) {
+    return {Type::THREAD_NAME, name};
+  }
+  static StackFrame FromProgramCounter(const void* pc) {
+    return {Type::PROGRAM_COUNTER, pc};
+  }
+
+  Type type;
+  const void* value;
+};
+
+bool BASE_EXPORT operator < (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator == (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator != (const StackFrame& lhs, const StackFrame& rhs);
 
 struct BASE_EXPORT Backtrace {
-  // Unused backtrace frames are filled with nullptr frames. If the stack is
-  // higher than what can be stored here, the bottom frames are stored. Based
-  // on the data above, a depth of 12 captures the full stack in the vast
-  // majority of the cases.
-  StackFrame frames[12];
+  Backtrace();
+
+  // If the stack is higher than what can be stored here, the bottom frames
+  // (the ones closer to main()) are stored. Depth of 12 is enough for most
+  // pseudo traces (see above), but not for native traces, where we need more.
+  enum { kMaxFrameCount = 48 };
+  StackFrame frames[kMaxFrameCount];
+  size_t frame_count;
 };
 
 bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
+bool BASE_EXPORT operator!=(const Backtrace& lhs, const Backtrace& rhs);
 
 // The |AllocationContext| is context metadata that is kept for every allocation
 // when heap profiling is enabled. To simplify memory management for book-
-// keeping, this struct has a fixed size. All |const char*|s here must have
-// static lifetime.
+// keeping, this struct has a fixed size.
 struct BASE_EXPORT AllocationContext {
- public:
-  // An allocation context with empty backtrace and unknown type.
-  static AllocationContext Empty();
+  AllocationContext();
+  AllocationContext(const Backtrace& backtrace, const char* type_name);
 
   Backtrace backtrace;
 
@@ -63,19 +93,18 @@
   // deep string comparison. In a component build, where a type name can have a
   // string literal in several dynamic libraries, this may distort grouping.
   const char* type_name;
-
- private:
-  friend class AllocationContextTracker;
-
-  // Don't allow uninitialized instances except inside the allocation context
-  // tracker. Except in tests, an |AllocationContext| should only be obtained
-  // from the tracker. In tests, paying the overhead of initializing the struct
-  // to |Empty| and then overwriting the members is not such a big deal.
-  AllocationContext();
 };
 
 bool BASE_EXPORT operator==(const AllocationContext& lhs,
                             const AllocationContext& rhs);
+bool BASE_EXPORT operator!=(const AllocationContext& lhs,
+                            const AllocationContext& rhs);
+
+// Struct to store the size and count of the allocations.
+struct AllocationMetrics {
+  size_t size;
+  size_t count;
+};
 
 }  // namespace trace_event
 }  // namespace base
@@ -83,6 +112,11 @@
 namespace BASE_HASH_NAMESPACE {
 
 template <>
+struct BASE_EXPORT hash<base::trace_event::StackFrame> {
+  size_t operator()(const base::trace_event::StackFrame& frame) const;
+};
+
+template <>
 struct BASE_EXPORT hash<base::trace_event::Backtrace> {
   size_t operator()(const base::trace_event::Backtrace& backtrace) const;
 };
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
index 791ab7a..31f311a 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -8,16 +8,29 @@
 #include <iterator>
 
 #include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/threading/platform_thread.h"
 #include "base/threading/thread_local_storage.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
 namespace base {
 namespace trace_event {
 
-subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0;
+subtle::Atomic32 AllocationContextTracker::capture_mode_ =
+    static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
 
 namespace {
 
+const size_t kMaxStackDepth = 128u;
+const size_t kMaxTaskDepth = 16u;
+AllocationContextTracker* const kInitializingSentinel =
+    reinterpret_cast<AllocationContextTracker*>(-1);
+const char kTracingOverhead[] = "tracing_overhead";
+
 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
 
 // This function is added to the TLS slot to clean up the instance when the
@@ -26,17 +39,41 @@
   delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
 }
 
+// Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
+// deadlock when lock is already held by ThreadIdNameManager before the current
+// allocation. Gets the thread name from kernel if available or returns a string
+// with id. This function intenionally leaks the allocated strings since they
+// are used to tag allocations even after the thread dies.
+const char* GetAndLeakThreadName() {
+  char name[16];
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  // If the thread name is not set, try to get it from prctl. Thread name might
+  // not be set in cases where the thread started before heap profiling was
+  // enabled.
+  int err = prctl(PR_GET_NAME, name);
+  if (!err) {
+    return strdup(name);
+  }
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+  // Use tid if we don't have a thread name.
+  snprintf(name, sizeof(name), "%lu",
+           static_cast<unsigned long>(PlatformThread::CurrentId()));
+  return strdup(name);
+}
+
 }  // namespace
 
-AllocationContextTracker::AllocationContextTracker() {}
-AllocationContextTracker::~AllocationContextTracker() {}
-
 // static
-AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() {
-  auto tracker =
+AllocationContextTracker*
+AllocationContextTracker::GetInstanceForCurrentThread() {
+  AllocationContextTracker* tracker =
       static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get());
+  if (tracker == kInitializingSentinel)
+    return nullptr;  // Re-entrancy case.
 
   if (!tracker) {
+    g_tls_alloc_ctx_tracker.Set(kInitializingSentinel);
     tracker = new AllocationContextTracker();
     g_tls_alloc_ctx_tracker.Set(tracker);
   }
@@ -44,69 +81,163 @@
   return tracker;
 }
 
+AllocationContextTracker::AllocationContextTracker()
+    : thread_name_(nullptr), ignore_scope_depth_(0) {
+  pseudo_stack_.reserve(kMaxStackDepth);
+  task_contexts_.reserve(kMaxTaskDepth);
+}
+AllocationContextTracker::~AllocationContextTracker() {}
+
 // static
-void AllocationContextTracker::SetCaptureEnabled(bool enabled) {
+void AllocationContextTracker::SetCurrentThreadName(const char* name) {
+  if (name && capture_mode() != CaptureMode::DISABLED) {
+    GetInstanceForCurrentThread()->thread_name_ = name;
+  }
+}
+
+// static
+void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
   // When enabling capturing, also initialize the TLS slot. This does not create
   // a TLS instance yet.
-  if (enabled && !g_tls_alloc_ctx_tracker.initialized())
+  if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
     g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
 
-  // Release ordering ensures that when a thread observes |capture_enabled_| to
+  // Release ordering ensures that when a thread observes |capture_mode_| to
   // be true through an acquire load, the TLS slot has been initialized.
-  subtle::Release_Store(&capture_enabled_, enabled);
+  subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
 }
 
-// static
-void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) {
-  auto tracker = AllocationContextTracker::GetThreadLocalTracker();
-
+void AllocationContextTracker::PushPseudoStackFrame(
+    const char* trace_event_name) {
   // Impose a limit on the height to verify that every push is popped, because
   // in practice the pseudo stack never grows higher than ~20 frames.
-  DCHECK_LT(tracker->pseudo_stack_.size(), 128u);
-  tracker->pseudo_stack_.push_back(frame);
+  if (pseudo_stack_.size() < kMaxStackDepth)
+    pseudo_stack_.push_back(trace_event_name);
+  else
+    NOTREACHED();
 }
 
-// static
-void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) {
-  auto tracker = AllocationContextTracker::GetThreadLocalTracker();
-
+void AllocationContextTracker::PopPseudoStackFrame(
+    const char* trace_event_name) {
   // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
   // scope, the frame was never pushed, so it is possible that pop is called
   // on an empty stack.
-  if (tracker->pseudo_stack_.empty())
+  if (pseudo_stack_.empty())
     return;
 
   // Assert that pushes and pops are nested correctly. This DCHECK can be
   // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
   // without a corresponding TRACE_EVENT_BEGIN).
-  DCHECK_EQ(frame, tracker->pseudo_stack_.back())
+  DCHECK_EQ(trace_event_name, pseudo_stack_.back())
       << "Encountered an unmatched TRACE_EVENT_END";
 
-  tracker->pseudo_stack_.pop_back();
+  pseudo_stack_.pop_back();
+}
+
+void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
+  DCHECK(context);
+  if (task_contexts_.size() < kMaxTaskDepth)
+    task_contexts_.push_back(context);
+  else
+    NOTREACHED();
+}
+
+void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
+  // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
+  // scope, the context was never pushed, so it is possible that pop is called
+  // on an empty stack.
+  if (task_contexts_.empty())
+    return;
+
+  DCHECK_EQ(context, task_contexts_.back())
+      << "Encountered an unmatched context end";
+  task_contexts_.pop_back();
 }
 
 // static
 AllocationContext AllocationContextTracker::GetContextSnapshot() {
-  AllocationContextTracker* tracker = GetThreadLocalTracker();
   AllocationContext ctx;
 
-  // Fill the backtrace.
-  {
-    auto src = tracker->pseudo_stack_.begin();
-    auto dst = std::begin(ctx.backtrace.frames);
-    auto src_end = tracker->pseudo_stack_.end();
-    auto dst_end = std::end(ctx.backtrace.frames);
-
-    // Copy as much of the bottom of the pseudo stack into the backtrace as
-    // possible.
-    for (; src != src_end && dst != dst_end; src++, dst++)
-      *dst = *src;
-
-    // If there is room for more, fill the remaining slots with empty frames.
-    std::fill(dst, dst_end, nullptr);
+  if (ignore_scope_depth_) {
+    ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
+    ctx.type_name = kTracingOverhead;
+    ctx.backtrace.frame_count = 1;
+    return ctx;
   }
 
-  ctx.type_name = nullptr;
+  CaptureMode mode = static_cast<CaptureMode>(
+      subtle::NoBarrier_Load(&capture_mode_));
+
+  auto* backtrace = std::begin(ctx.backtrace.frames);
+  auto* backtrace_end = std::end(ctx.backtrace.frames);
+
+  if (!thread_name_) {
+    // Ignore the string allocation made by GetAndLeakThreadName to avoid
+    // reentrancy.
+    ignore_scope_depth_++;
+    thread_name_ = GetAndLeakThreadName();
+    ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
+    DCHECK(thread_name_);
+    ignore_scope_depth_--;
+  }
+
+  // Add the thread name as the first entry in pseudo stack.
+  if (thread_name_) {
+    *backtrace++ = StackFrame::FromThreadName(thread_name_);
+  }
+
+  switch (mode) {
+    case CaptureMode::DISABLED:
+      {
+        break;
+      }
+    case CaptureMode::PSEUDO_STACK:
+      {
+        for (const char* event_name: pseudo_stack_) {
+          if (backtrace == backtrace_end) {
+            break;
+          }
+          *backtrace++ = StackFrame::FromTraceEventName(event_name);
+        }
+        break;
+      }
+    case CaptureMode::NATIVE_STACK:
+      {
+        // Backtrace contract requires us to return bottom frames, i.e.
+        // from main() and up. Stack unwinding produces top frames, i.e.
+        // from this point and up until main(). We request many frames to
+        // make sure we reach main(), and then copy bottom portion of them.
+        const void* frames[128];
+        static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
+                      "not requesting enough frames to fill Backtrace");
+#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL)
+        size_t frame_count = debug::TraceStackFramePointers(
+            frames,
+            arraysize(frames),
+            1 /* exclude this function from the trace */ );
+#else
+        size_t frame_count = 0;
+        NOTREACHED();
+#endif
+
+        // Copy frames backwards
+        size_t backtrace_capacity = backtrace_end - backtrace;
+        size_t top_frame_index = (backtrace_capacity >= frame_count) ?
+            0 :
+            frame_count - backtrace_capacity;
+        for (size_t i = frame_count; i > top_frame_index;) {
+          const void* frame = frames[--i];
+          *backtrace++ = StackFrame::FromProgramCounter(frame);
+        }
+        break;
+      }
+  }
+
+  ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
+
+  // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
+  // (component name) in the heap profiler and not piggy back on the type name.
+  ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
 
   return ctx;
 }
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
index 9c9a313..454200c 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -9,6 +9,7 @@
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
+#include "base/debug/stack_trace.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
@@ -23,46 +24,82 @@
 // details.
 class BASE_EXPORT AllocationContextTracker {
  public:
-  // Globally enables capturing allocation context.
-  // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future?
-  // Or at least have something that guards agains enable -> disable -> enable?
-  static void SetCaptureEnabled(bool enabled);
+  enum class CaptureMode: int32_t {
+    DISABLED,       // Don't capture anything
+    PSEUDO_STACK,   // GetContextSnapshot() returns pseudo stack trace
+    NATIVE_STACK    // GetContextSnapshot() returns native (real) stack trace
+  };
 
-  // Returns whether capturing allocation context is enabled globally.
-  inline static bool capture_enabled() {
+  // Globally sets capturing mode.
+  // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
+  static void SetCaptureMode(CaptureMode mode);
+
+  // Returns global capturing mode.
+  inline static CaptureMode capture_mode() {
     // A little lag after heap profiling is enabled or disabled is fine, it is
     // more important that the check is as cheap as possible when capturing is
     // not enabled, so do not issue a memory barrier in the fast path.
-    if (subtle::NoBarrier_Load(&capture_enabled_) == 0)
-      return false;
+    if (subtle::NoBarrier_Load(&capture_mode_) ==
+            static_cast<int32_t>(CaptureMode::DISABLED))
+      return CaptureMode::DISABLED;
 
     // In the slow path, an acquire load is required to pair with the release
-    // store in |SetCaptureEnabled|. This is to ensure that the TLS slot for
+    // store in |SetCaptureMode|. This is to ensure that the TLS slot for
     // the thread-local allocation context tracker has been initialized if
-    // |capture_enabled| returns true.
-    return subtle::Acquire_Load(&capture_enabled_) != 0;
+    // |capture_mode| returns something other than DISABLED.
+    return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
+  }
+
+  // Returns the thread-local instance, creating one if necessary. Returns
+  // always a valid instance, unless it is called re-entrantly, in which case
+  // returns nullptr in the nested calls.
+  static AllocationContextTracker* GetInstanceForCurrentThread();
+
+  // Set the thread name in the AllocationContextTracker of the current thread
+  // if capture is enabled.
+  static void SetCurrentThreadName(const char* name);
+
+  // Starts and ends a new ignore scope between which the allocations are
+  // ignored in the heap profiler. A dummy context that short circuits to
+  // "tracing_overhead" is returned for these allocations.
+  void begin_ignore_scope() { ignore_scope_depth_++; }
+  void end_ignore_scope() {
+    if (ignore_scope_depth_)
+      ignore_scope_depth_--;
   }
 
   // Pushes a frame onto the thread-local pseudo stack.
-  static void PushPseudoStackFrame(StackFrame frame);
+  void PushPseudoStackFrame(const char* trace_event_name);
 
   // Pops a frame from the thread-local pseudo stack.
-  static void PopPseudoStackFrame(StackFrame frame);
+  void PopPseudoStackFrame(const char* trace_event_name);
+
+  // Push and pop current task's context. A stack is used to support nested
+  // tasks and the top of the stack will be used in allocation context.
+  void PushCurrentTaskContext(const char* context);
+  void PopCurrentTaskContext(const char* context);
 
   // Returns a snapshot of the current thread-local context.
-  static AllocationContext GetContextSnapshot();
+  AllocationContext GetContextSnapshot();
 
   ~AllocationContextTracker();
 
  private:
   AllocationContextTracker();
 
-  static AllocationContextTracker* GetThreadLocalTracker();
-
-  static subtle::Atomic32 capture_enabled_;
+  static subtle::Atomic32 capture_mode_;
 
   // The pseudo stack where frames are |TRACE_EVENT| names.
-  std::vector<StackFrame> pseudo_stack_;
+  std::vector<const char*> pseudo_stack_;
+
+  // The thread name is used as the first entry in the pseudo stack.
+  const char* thread_name_;
+
+  // Stack of tasks' contexts. Context serves as a different dimension than
+  // pseudo stack to cluster allocations.
+  std::vector<const char*> task_contexts_;
+
+  uint32_t ignore_scope_depth_;
 
   DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
 };
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 58255ad..3064a6a 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -7,6 +7,8 @@
 #include <iterator>
 
 #include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 #include "base/trace_event/trace_event.h"
@@ -17,6 +19,7 @@
 
 // Define all strings once, because the pseudo stack requires pointer equality,
 // and string interning is unreliable.
+const char kThreadName[] = "TestThread";
 const char kCupcake[] = "Cupcake";
 const char kDonut[] = "Donut";
 const char kEclair[] = "Eclair";
@@ -27,10 +30,12 @@
 // in |AllocationContextTracker::GetContextSnapshot|.
 template <size_t N>
 void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
-  AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
 
-  auto actual = std::begin(ctx.backtrace.frames);
-  auto actual_bottom = std::end(ctx.backtrace.frames);
+  auto* actual = std::begin(ctx.backtrace.frames);
+  auto* actual_bottom = actual + ctx.backtrace.frame_count;
   auto expected = std::begin(expected_backtrace);
   auto expected_bottom = std::end(expected_backtrace);
 
@@ -45,11 +50,14 @@
   ASSERT_EQ(expected, expected_bottom);
 }
 
-void AssertBacktraceEmpty() {
-  AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
+void AssertBacktraceContainsOnlyThreadName() {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
 
-  for (StackFrame frame : ctx.backtrace.frames)
-    ASSERT_EQ(nullptr, frame);
+  ASSERT_EQ(1u, ctx.backtrace.frame_count);
+  ASSERT_EQ(t, ctx.backtrace.frames[0]);
 }
 
 class AllocationContextTrackerTest : public testing::Test {
@@ -57,34 +65,36 @@
   void SetUp() override {
     TraceConfig config("");
     TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
-    AllocationContextTracker::SetCaptureEnabled(true);
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+    AllocationContextTracker::SetCurrentThreadName(kThreadName);
   }
 
   void TearDown() override {
-    AllocationContextTracker::SetCaptureEnabled(false);
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::DISABLED);
     TraceLog::GetInstance()->SetDisabled();
   }
 };
 
 // Check that |TRACE_EVENT| macros push and pop to the pseudo stack correctly.
-// Also check that |GetContextSnapshot| fills the backtrace with null pointers
-// when the pseudo stack height is less than the capacity.
 TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
-  StackFrame c = kCupcake;
-  StackFrame d = kDonut;
-  StackFrame e = kEclair;
-  StackFrame f = kFroyo;
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   {
     TRACE_EVENT0("Testing", kCupcake);
-    StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+    StackFrame frame_c[] = {t, c};
     AssertBacktraceEquals(frame_c);
 
     {
       TRACE_EVENT0("Testing", kDonut);
-      StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+      StackFrame frame_cd[] = {t, c, d};
       AssertBacktraceEquals(frame_cd);
     }
 
@@ -92,38 +102,39 @@
 
     {
       TRACE_EVENT0("Testing", kEclair);
-      StackFrame frame_ce[] = {c, e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+      StackFrame frame_ce[] = {t, c, e};
       AssertBacktraceEquals(frame_ce);
     }
 
     AssertBacktraceEquals(frame_c);
   }
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   {
     TRACE_EVENT0("Testing", kFroyo);
-    StackFrame frame_f[] = {f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+    StackFrame frame_f[] = {t, f};
     AssertBacktraceEquals(frame_f);
   }
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 }
 
 // Same as |PseudoStackScopedTrace|, but now test the |TRACE_EVENT_BEGIN| and
 // |TRACE_EVENT_END| macros.
 TEST_F(AllocationContextTrackerTest, PseudoStackBeginEndTrace) {
-  StackFrame c = kCupcake;
-  StackFrame d = kDonut;
-  StackFrame e = kEclair;
-  StackFrame f = kFroyo;
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
 
-  StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_ce[] = {c, e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_f[] = {f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+  StackFrame frame_c[] = {t, c};
+  StackFrame frame_cd[] = {t, c, d};
+  StackFrame frame_ce[] = {t, c, e};
+  StackFrame frame_f[] = {t, f};
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   TRACE_EVENT_BEGIN0("Testing", kCupcake);
   AssertBacktraceEquals(frame_c);
@@ -141,27 +152,28 @@
   AssertBacktraceEquals(frame_c);
   TRACE_EVENT_END0("Testing", kCupcake);
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   TRACE_EVENT_BEGIN0("Testing", kFroyo);
   AssertBacktraceEquals(frame_f);
   TRACE_EVENT_END0("Testing", kFroyo);
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 }
 
 TEST_F(AllocationContextTrackerTest, PseudoStackMixedTrace) {
-  StackFrame c = kCupcake;
-  StackFrame d = kDonut;
-  StackFrame e = kEclair;
-  StackFrame f = kFroyo;
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
 
-  StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_e[] = {e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_ef[] = {e, f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+  StackFrame frame_c[] = {t, c};
+  StackFrame frame_cd[] = {t, c, d};
+  StackFrame frame_e[] = {t, e};
+  StackFrame frame_ef[] = {t, e, f};
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   TRACE_EVENT_BEGIN0("Testing", kCupcake);
   AssertBacktraceEquals(frame_c);
@@ -173,7 +185,7 @@
 
   AssertBacktraceEquals(frame_c);
   TRACE_EVENT_END0("Testing", kCupcake);
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   {
     TRACE_EVENT0("Testing", kEclair);
@@ -185,12 +197,15 @@
     AssertBacktraceEquals(frame_e);
   }
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 }
 
 TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
-  // Push 12 events onto the pseudo stack.
-  TRACE_EVENT0("Testing", kCupcake);
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+  // Push 11 events onto the pseudo stack.
   TRACE_EVENT0("Testing", kCupcake);
   TRACE_EVENT0("Testing", kCupcake);
   TRACE_EVENT0("Testing", kCupcake);
@@ -207,19 +222,64 @@
 
   {
     TRACE_EVENT0("Testing", kGingerbread);
-    AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
+    AllocationContext ctx =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
 
     // The pseudo stack relies on pointer equality, not deep string comparisons.
-    ASSERT_EQ(kCupcake, ctx.backtrace.frames[0]);
-    ASSERT_EQ(kFroyo, ctx.backtrace.frames[11]);
+    ASSERT_EQ(t, ctx.backtrace.frames[0]);
+    ASSERT_EQ(c, ctx.backtrace.frames[1]);
+    ASSERT_EQ(f, ctx.backtrace.frames[11]);
   }
 
   {
-    AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
-    ASSERT_EQ(kCupcake, ctx.backtrace.frames[0]);
-    ASSERT_EQ(kFroyo, ctx.backtrace.frames[11]);
+    AllocationContext ctx =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
+    ASSERT_EQ(t, ctx.backtrace.frames[0]);
+    ASSERT_EQ(c, ctx.backtrace.frames[1]);
+    ASSERT_EQ(f, ctx.backtrace.frames[11]);
   }
 }
 
+TEST_F(AllocationContextTrackerTest, TrackTaskContext) {
+  const char kContext1[] = "context1";
+  const char kContext2[] = "context2";
+  {
+    // The context from the scoped task event should be used as type name.
+    TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
+    AllocationContext ctx1 =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
+    ASSERT_EQ(kContext1, ctx1.type_name);
+
+    // In case of nested events, the last event's context should be used.
+    TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
+    AllocationContext ctx2 =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
+    ASSERT_EQ(kContext2, ctx2.type_name);
+  }
+
+  // Type should be nullptr without task event.
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
+  ASSERT_FALSE(ctx.type_name);
+}
+
+TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kDonut);
+  HEAP_PROFILER_SCOPED_IGNORE;
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
+  const StringPiece kTracingOverhead("tracing_overhead");
+  ASSERT_EQ(kTracingOverhead,
+            static_cast<const char*>(ctx.backtrace.frames[0].value));
+  ASSERT_EQ(1u, ctx.backtrace.frame_count);
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
new file mode 100644
index 0000000..2c2cd37
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -0,0 +1,180 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_register.h"
+
+#include <algorithm>
+
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+namespace base {
+namespace trace_event {
+
+AllocationRegister::ConstIterator::ConstIterator(
+    const AllocationRegister& alloc_register, AllocationIndex index)
+    : register_(alloc_register),
+      index_(index) {}
+
+void AllocationRegister::ConstIterator::operator++() {
+  index_ = register_.allocations_.Next(index_ + 1);
+}
+
+bool AllocationRegister::ConstIterator::operator!=(
+    const ConstIterator& other) const {
+  return index_ != other.index_;
+}
+
+AllocationRegister::Allocation
+AllocationRegister::ConstIterator::operator*() const {
+  return register_.GetAllocation(index_);
+}
+
+size_t AllocationRegister::BacktraceHasher::operator () (
+    const Backtrace& backtrace) const {
+  const size_t kSampleLength = 10;
+
+  uintptr_t total_value = 0;
+
+  size_t head_end = std::min(backtrace.frame_count, kSampleLength);
+  for (size_t i = 0; i != head_end; ++i) {
+    total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
+  }
+
+  size_t tail_start = backtrace.frame_count -
+      std::min(backtrace.frame_count - head_end, kSampleLength);
+  for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
+    total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
+  }
+
+  total_value += backtrace.frame_count;
+
+  // These magic constants give best results in terms of average collisions
+  // per backtrace. They were found by replaying real backtraces from Linux
+  // and Android against different hash functions.
+  return (total_value * 131101) >> 14;
+}
+
+size_t AllocationRegister::AddressHasher::operator () (
+    const void* address) const {
+  // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
+  // been chosen carefully based on measurements with real-word data (addresses
+  // recorded from a Chrome trace run). It is the first prime after 2^17. For
+  // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
+  // buckets. Microbenchmarks show that this simple scheme outperforms fancy
+  // hashes like Murmur3 by 20 to 40 percent.
+  const uintptr_t key = reinterpret_cast<uintptr_t>(address);
+  const uintptr_t a = 131101;
+  const uintptr_t shift = 14;
+  const uintptr_t h = (key * a) >> shift;
+  return h;
+}
+
+AllocationRegister::AllocationRegister()
+    : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
+
+AllocationRegister::AllocationRegister(size_t allocation_capacity,
+                                       size_t backtrace_capacity)
+    : allocations_(allocation_capacity),
+      backtraces_(backtrace_capacity) {}
+
+AllocationRegister::~AllocationRegister() {
+}
+
+void AllocationRegister::Insert(const void* address,
+                                size_t size,
+                                const AllocationContext& context) {
+  DCHECK(address != nullptr);
+  if (size == 0) {
+    return;
+  }
+
+  AllocationInfo info = {
+      size,
+      context.type_name,
+      InsertBacktrace(context.backtrace)
+  };
+
+  // Try to insert the allocation.
+  auto index_and_flag = allocations_.Insert(address, info);
+  if (!index_and_flag.second) {
+    // |address| is already there - overwrite the allocation info.
+    auto& old_info = allocations_.Get(index_and_flag.first).second;
+    RemoveBacktrace(old_info.backtrace_index);
+    old_info = info;
+  }
+}
+
+void AllocationRegister::Remove(const void* address) {
+  auto index = allocations_.Find(address);
+  if (index == AllocationMap::kInvalidKVIndex) {
+    return;
+  }
+
+  const AllocationInfo& info = allocations_.Get(index).second;
+  RemoveBacktrace(info.backtrace_index);
+  allocations_.Remove(index);
+}
+
+bool AllocationRegister::Get(const void* address,
+                             Allocation* out_allocation) const {
+  auto index = allocations_.Find(address);
+  if (index == AllocationMap::kInvalidKVIndex) {
+    return false;
+  }
+
+  if (out_allocation) {
+    *out_allocation = GetAllocation(index);
+  }
+  return true;
+}
+
+AllocationRegister::ConstIterator AllocationRegister::begin() const {
+  return ConstIterator(*this, allocations_.Next(0));
+}
+
+AllocationRegister::ConstIterator AllocationRegister::end() const {
+  return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
+}
+
+void AllocationRegister::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) const {
+  size_t allocated = sizeof(AllocationRegister);
+  size_t resident = sizeof(AllocationRegister)
+                    + allocations_.EstimateUsedMemory()
+                    + backtraces_.EstimateUsedMemory();
+  overhead->Add("AllocationRegister", allocated, resident);
+}
+
+AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
+    const Backtrace& backtrace) {
+  auto index = backtraces_.Insert(backtrace, 0).first;
+  auto& backtrace_and_count = backtraces_.Get(index);
+  backtrace_and_count.second++;
+  return index;
+}
+
+void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
+  auto& backtrace_and_count = backtraces_.Get(index);
+  if (--backtrace_and_count.second == 0) {
+    // Backtrace is not referenced anymore - remove it.
+    backtraces_.Remove(index);
+  }
+}
+
+AllocationRegister::Allocation AllocationRegister::GetAllocation(
+    AllocationMap::KVIndex index) const {
+  const auto& address_and_info = allocations_.Get(index);
+  const auto& backtrace_and_count = backtraces_.Get(
+      address_and_info.second.backtrace_index);
+  return {
+      address_and_info.first,
+      address_and_info.second.size,
+      AllocationContext(
+          backtrace_and_count.first,
+          address_and_info.second.type_name)
+  };
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
new file mode 100644
index 0000000..86e2721
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -0,0 +1,356 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/process/process_metrics.h"
+#include "base/template_util.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+class AllocationRegisterTest;
+
+namespace internal {
+
+// Allocates a region of virtual address space of |size| rounded up to the
+// system page size. The memory is zeroed by the system. A guard page is
+// added after the end.
+void* AllocateGuardedVirtualMemory(size_t size);
+
+// Frees a region of virtual address space allocated by a call to
+// |AllocateVirtualMemory|.
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
+
+// Hash map that mmaps memory only once in the constructor. Its API is
+// similar to std::unordered_map, only index (KVIndex) is used to address
+template <size_t NumBuckets, class Key, class Value, class KeyHasher>
+class FixedHashMap {
+  // To keep things simple we don't call destructors.
+  static_assert(is_trivially_destructible<Key>::value &&
+                    is_trivially_destructible<Value>::value,
+                "Key and Value shouldn't have destructors");
+ public:
+  using KVPair = std::pair<const Key, Value>;
+
+  // For implementation simplicity API uses integer index instead
+  // of iterators. Most operations (except FindValidIndex) on KVIndex
+  // are O(1).
+  using KVIndex = size_t;
+  static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
+
+  // Capacity controls how many items this hash map can hold, and largely
+  // affects memory footprint.
+  FixedHashMap(size_t capacity)
+    : num_cells_(capacity),
+      cells_(static_cast<Cell*>(
+          AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
+      buckets_(static_cast<Bucket*>(
+          AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
+      free_list_(nullptr),
+      next_unused_cell_(0) {}
+
+  ~FixedHashMap() {
+    FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
+    FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
+  }
+
+  std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
+    Cell** p_cell = Lookup(key);
+    Cell* cell = *p_cell;
+    if (cell) {
+      return {static_cast<KVIndex>(cell - cells_), false};  // not inserted
+    }
+
+    // Get a free cell and link it.
+    *p_cell = cell = GetFreeCell();
+    cell->p_prev = p_cell;
+    cell->next = nullptr;
+
+    // Initialize key/value pair. Since key is 'const Key' this is the
+    // only way to initialize it.
+    new (&cell->kv) KVPair(key, value);
+
+    return {static_cast<KVIndex>(cell - cells_), true};  // inserted
+  }
+
+  void Remove(KVIndex index) {
+    DCHECK_LT(index, next_unused_cell_);
+
+    Cell* cell = &cells_[index];
+
+    // Unlink the cell.
+    *cell->p_prev = cell->next;
+    if (cell->next) {
+      cell->next->p_prev = cell->p_prev;
+    }
+    cell->p_prev = nullptr;  // mark as free
+
+    // Add it to the free list.
+    cell->next = free_list_;
+    free_list_ = cell;
+  }
+
+  KVIndex Find(const Key& key) const {
+    Cell* cell = *Lookup(key);
+    return cell ? static_cast<KVIndex>(cell - cells_) : kInvalidKVIndex;
+  }
+
+  KVPair& Get(KVIndex index) {
+    return cells_[index].kv;
+  }
+
+  const KVPair& Get(KVIndex index) const {
+    return cells_[index].kv;
+  }
+
+  // Finds next index that has a KVPair associated with it. Search starts
+  // with the specified index. Returns kInvalidKVIndex if nothing was found.
+  // To find the first valid index, call this function with 0. Continue
+  // calling with the last_index + 1 until kInvalidKVIndex is returned.
+  KVIndex Next(KVIndex index) const {
+    for (;index < next_unused_cell_; ++index) {
+      if (cells_[index].p_prev) {
+        return index;
+      }
+    }
+    return kInvalidKVIndex;
+  }
+
+  // Estimates number of bytes used in allocated memory regions.
+  size_t EstimateUsedMemory() const {
+    size_t page_size = base::GetPageSize();
+    // |next_unused_cell_| is the first cell that wasn't touched, i.e.
+    // it's the number of touched cells.
+    return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
+           bits::Align(sizeof(Bucket) * NumBuckets, page_size);
+  }
+
+ private:
+  friend base::trace_event::AllocationRegisterTest;
+
+  struct Cell {
+    KVPair kv;
+    Cell* next;
+
+    // Conceptually this is |prev| in a doubly linked list. However, buckets
+    // also participate in the bucket's cell list - they point to the list's
+    // head and also need to be linked / unlinked properly. To treat these two
+    // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
+    // that points to this Cell" kind of thing. So |p_prev| points to a bucket
+    // for the first cell in a list, and points to |next| of the previous cell
+    // for any other cell. With that Lookup() is the only function that handles
+    // buckets / cells differently.
+    // If |p_prev| is nullptr, the cell is in the free list.
+    Cell** p_prev;
+  };
+
+  using Bucket = Cell*;
+
+  // Returns a pointer to the cell that contains or should contain the entry
+  // for |key|. The pointer may point at an element of |buckets_| or at the
+  // |next| member of an element of |cells_|.
+  Cell** Lookup(const Key& key) const {
+    // The list head is in |buckets_| at the hash offset.
+    Cell** p_cell = &buckets_[Hash(key)];
+
+    // Chase down the list until the cell that holds |key| is found,
+    // or until the list ends.
+    while (*p_cell && (*p_cell)->kv.first != key) {
+      p_cell = &(*p_cell)->next;
+    }
+
+    return p_cell;
+  }
+
+  // Returns a cell that is not being used to store an entry (either by
+  // recycling from the free list or by taking a fresh cell).
+  Cell* GetFreeCell() {
+    // First try to re-use a cell from the free list.
+    if (free_list_) {
+      Cell* cell = free_list_;
+      free_list_ = cell->next;
+      return cell;
+    }
+
+    // Otherwise pick the next cell that has not been touched before.
+    size_t idx = next_unused_cell_;
+    next_unused_cell_++;
+
+    // If the hash table has too little capacity (when too little address space
+    // was reserved for |cells_|), |next_unused_cell_| can be an index outside
+    // of the allocated storage. A guard page is allocated there to crash the
+    // program in that case. There are alternative solutions:
+    // - Deal with it, increase capacity by reallocating |cells_|.
+    // - Refuse to insert and let the caller deal with it.
+    // Because free cells are re-used before accessing fresh cells with a higher
+    // index, and because reserving address space without touching it is cheap,
+    // the simplest solution is to just allocate a humongous chunk of address
+    // space.
+
+    DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+
+    return &cells_[idx];
+  }
+
+  // Returns a value in the range [0, NumBuckets - 1] (inclusive).
+  size_t Hash(const Key& key) const {
+    if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
+      // NumBuckets is a power of 2.
+      return KeyHasher()(key) & (NumBuckets - 1);
+    } else {
+      return KeyHasher()(key) % NumBuckets;
+    }
+  }
+
+  // Number of cells.
+  size_t const num_cells_;
+
+  // The array of cells. This array is backed by mmapped memory. Lower indices
+  // are accessed first, higher indices are accessed only when the |free_list_|
+  // is empty. This is to minimize the amount of resident memory used.
+  Cell* const cells_;
+
+  // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
+  // contain the pointer to the linked list of cells for |Hash(key)|.
+  // This array is backed by mmapped memory.
+  mutable Bucket* buckets_;
+
+  // The head of the free list.
+  Cell* free_list_;
+
+  // The index of the first element of |cells_| that has not been used before.
+  // If the free list is empty and a new cell is needed, the cell at this index
+  // is used. This is the high water mark for the number of entries stored.
+  size_t next_unused_cell_;
+
+  DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
+};
+
+}  // namespace internal
+
+class TraceEventMemoryOverhead;
+
+// The allocation register keeps track of all allocations that have not been
+// freed. Internally it has two hashtables: one for Backtraces and one for
+// actual allocations. Sizes of both hashtables are fixed, and this class
+// allocates (mmaps) only in its constructor.
+class BASE_EXPORT AllocationRegister {
+ public:
+  // Details about an allocation.
+  struct Allocation {
+    const void* address;
+    size_t size;
+    AllocationContext context;
+  };
+
+  // An iterator that iterates entries in no particular order.
+  class BASE_EXPORT ConstIterator {
+   public:
+    void operator++();
+    bool operator!=(const ConstIterator& other) const;
+    Allocation operator*() const;
+
+   private:
+    friend class AllocationRegister;
+    using AllocationIndex = size_t;
+
+    ConstIterator(const AllocationRegister& alloc_register,
+                  AllocationIndex index);
+
+    const AllocationRegister& register_;
+    AllocationIndex index_;
+  };
+
+  AllocationRegister();
+  AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
+
+  ~AllocationRegister();
+
+  // Inserts allocation details into the table. If the address was present
+  // already, its details are updated. |address| must not be null.
+  void Insert(const void* address,
+              size_t size,
+              const AllocationContext& context);
+
+  // Removes the address from the table if it is present. It is ok to call this
+  // with a null pointer.
+  void Remove(const void* address);
+
+  // Finds allocation for the address and fills |out_allocation|.
+  bool Get(const void* address, Allocation* out_allocation) const;
+
+  ConstIterator begin() const;
+  ConstIterator end() const;
+
+  // Estimates memory overhead including |sizeof(AllocationRegister)|.
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
+
+ private:
+  friend AllocationRegisterTest;
+
+  // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
+  // hashing and should be changed together with AddressHasher.
+  static const size_t kAllocationBuckets = 1 << 18;
+  static const size_t kAllocationCapacity = 1500000;
+
+  // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
+  // needing to tweak BacktraceHasher implementation.
+  static const size_t kBacktraceBuckets = 1 << 15;
+  static const size_t kBacktraceCapacity = kBacktraceBuckets;
+
+  struct BacktraceHasher {
+    size_t operator () (const Backtrace& backtrace) const;
+  };
+
+  using BacktraceMap = internal::FixedHashMap<
+      kBacktraceBuckets,
+      Backtrace,
+      size_t, // Number of references to the backtrace (the key). Incremented
+              // when an allocation that references the backtrace is inserted,
+              // and decremented when the allocation is removed. When the
+              // number drops to zero, the backtrace is removed from the map.
+      BacktraceHasher>;
+
+  struct AllocationInfo {
+    size_t size;
+    const char* type_name;
+    BacktraceMap::KVIndex backtrace_index;
+  };
+
+  struct AddressHasher {
+    size_t operator () (const void* address) const;
+  };
+
+  using AllocationMap = internal::FixedHashMap<
+      kAllocationBuckets,
+      const void*,
+      AllocationInfo,
+      AddressHasher>;
+
+  BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
+  void RemoveBacktrace(BacktraceMap::KVIndex index);
+
+  Allocation GetAllocation(AllocationMap::KVIndex) const;
+
+  AllocationMap allocations_;
+  BacktraceMap backtraces_;
+
+  DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
diff --git a/base/trace_event/heap_profiler_allocation_register_posix.cc b/base/trace_event/heap_profiler_allocation_register_posix.cc
new file mode 100644
index 0000000..94eeb4d
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_register_posix.cc
@@ -0,0 +1,58 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_register.h"
+
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/process/process_metrics.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+namespace base {
+namespace trace_event {
+namespace internal {
+
+namespace {
+size_t GetGuardSize() {
+  return GetPageSize();
+}
+}
+
+void* AllocateGuardedVirtualMemory(size_t size) {
+  size = bits::Align(size, GetPageSize());
+
+  // Add space for a guard page at the end.
+  size_t map_size = size + GetGuardSize();
+
+  void* addr = mmap(nullptr, map_size, PROT_READ | PROT_WRITE,
+                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+  PCHECK(addr != MAP_FAILED);
+
+  // Mark the last page of the allocated address space as inaccessible
+  // (PROT_NONE). The read/write accessible space is still at least |min_size|
+  // bytes.
+  void* guard_addr =
+      reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + size);
+  int result = mprotect(guard_addr, GetGuardSize(), PROT_NONE);
+  PCHECK(result == 0);
+
+  return addr;
+}
+
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
+  size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
+  munmap(address, size);
+}
+
+}  // namespace internal
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
new file mode 100644
index 0000000..1bf06db
--- /dev/null
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -0,0 +1,323 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iterator>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_log.h"
+
+// Most of what the |HeapDumpWriter| does is aggregating detailed information
+// about the heap and deciding what to dump. The Input to this process is a list
+// of |AllocationContext|s and size pairs.
+//
+// The pairs are grouped into |Bucket|s. A bucket is a group of (context, size)
+// pairs where the properties of the contexts share a prefix. (Type name is
+// considered a list of length one here.) First all pairs are put into one
+// bucket that represents the entire heap. Then this bucket is recursively
+// broken down into smaller buckets. Each bucket keeps track of whether further
+// breakdown is possible.
+
+namespace base {
+namespace trace_event {
+namespace internal {
+namespace {
+
+// Denotes a property of |AllocationContext| to break down by.
+enum class BreakDownMode { kByBacktrace, kByTypeName };
+
+// A group of bytes for which the context shares a prefix.
+struct Bucket {
+  Bucket()
+      : size(0),
+        count(0),
+        backtrace_cursor(0),
+        is_broken_down_by_type_name(false) {}
+
+  std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
+      metrics_by_context;
+
+  // The sum of the sizes of |metrics_by_context|.
+  size_t size;
+
+  // The sum of number of allocations of |metrics_by_context|.
+  size_t count;
+
+  // The index of the stack frame that has not yet been broken down by. For all
+  // elements in this bucket, the stack frames 0 up to (but not including) the
+  // cursor, must be equal.
+  size_t backtrace_cursor;
+
+  // When true, the type name for all elements in this bucket must be equal.
+  bool is_broken_down_by_type_name;
+};
+
+// Comparison operator to order buckets by their size.
+bool operator<(const Bucket& lhs, const Bucket& rhs) {
+  return lhs.size < rhs.size;
+}
+
+// Groups the allocations in the bucket by |break_by|. The buckets in the
+// returned list will have |backtrace_cursor| advanced or
+// |is_broken_down_by_type_name| set depending on the property to group by.
+std::vector<Bucket> GetSubbuckets(const Bucket& bucket,
+                                  BreakDownMode break_by) {
+  base::hash_map<const void*, Bucket> breakdown;
+
+
+  if (break_by == BreakDownMode::kByBacktrace) {
+    for (const auto& context_and_metrics : bucket.metrics_by_context) {
+      const Backtrace& backtrace = context_and_metrics.first->backtrace;
+      const StackFrame* begin = std::begin(backtrace.frames);
+      const StackFrame* end = begin + backtrace.frame_count;
+      const StackFrame* cursor = begin + bucket.backtrace_cursor;
+
+      DCHECK_LE(cursor, end);
+
+      if (cursor != end) {
+        Bucket& subbucket = breakdown[cursor->value];
+        subbucket.size += context_and_metrics.second.size;
+        subbucket.count += context_and_metrics.second.count;
+        subbucket.metrics_by_context.push_back(context_and_metrics);
+        subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
+        subbucket.is_broken_down_by_type_name =
+            bucket.is_broken_down_by_type_name;
+        DCHECK_GT(subbucket.size, 0u);
+        DCHECK_GT(subbucket.count, 0u);
+      }
+    }
+  } else if (break_by == BreakDownMode::kByTypeName) {
+    if (!bucket.is_broken_down_by_type_name) {
+      for (const auto& context_and_metrics : bucket.metrics_by_context) {
+        const AllocationContext* context = context_and_metrics.first;
+        Bucket& subbucket = breakdown[context->type_name];
+        subbucket.size += context_and_metrics.second.size;
+        subbucket.count += context_and_metrics.second.count;
+        subbucket.metrics_by_context.push_back(context_and_metrics);
+        subbucket.backtrace_cursor = bucket.backtrace_cursor;
+        subbucket.is_broken_down_by_type_name = true;
+        DCHECK_GT(subbucket.size, 0u);
+        DCHECK_GT(subbucket.count, 0u);
+      }
+    }
+  }
+
+  std::vector<Bucket> buckets;
+  buckets.reserve(breakdown.size());
+  for (auto key_bucket : breakdown)
+    buckets.push_back(key_bucket.second);
+
+  return buckets;
+}
+
+// Breaks down the bucket by |break_by|. Returns only buckets that contribute
+// more than |min_size_bytes| to the total size. The long tail is omitted.
+std::vector<Bucket> BreakDownBy(const Bucket& bucket,
+                                BreakDownMode break_by,
+                                size_t min_size_bytes) {
+  std::vector<Bucket> buckets = GetSubbuckets(bucket, break_by);
+
+  // Ensure that |buckets| is a max-heap (the data structure, not memory heap),
+  // so its front contains the largest bucket. Buckets should be iterated
+  // ordered by size, but sorting the vector is overkill because the long tail
+  // of small buckets will be discarded. By using a max-heap, the optimal case
+  // where all but the first bucket are discarded is O(n). The worst case where
+  // no bucket is discarded is doing a heap sort, which is O(n log n).
+  std::make_heap(buckets.begin(), buckets.end());
+
+  // Keep including buckets until adding one would increase the number of
+  // bytes accounted for by |min_size_bytes|. The large buckets end up in
+  // [it, end()), [begin(), it) is the part that contains the max-heap
+  // of small buckets.
+  std::vector<Bucket>::iterator it;
+  for (it = buckets.end(); it != buckets.begin(); --it) {
+    if (buckets.front().size < min_size_bytes)
+      break;
+
+    // Put the largest bucket in [begin, it) at |it - 1| and max-heapify
+    // [begin, it - 1). This puts the next largest bucket at |buckets.front()|.
+    std::pop_heap(buckets.begin(), it);
+  }
+
+  // At this point, |buckets| looks like this (numbers are bucket sizes):
+  //
+  // <-- max-heap of small buckets --->
+  //                                  <-- large buckets by ascending size -->
+  // [ 19 | 11 | 13 | 7 | 2 | 5 | ... | 83 | 89 | 97 ]
+  //   ^                                ^              ^
+  //   |                                |              |
+  //   begin()                          it             end()
+
+  // Discard the long tail of buckets that contribute less than a percent.
+  buckets.erase(buckets.begin(), it);
+
+  return buckets;
+}
+
+}  // namespace
+
+bool operator<(Entry lhs, Entry rhs) {
+  // There is no need to compare |size|. If the backtrace and type name are
+  // equal then the sizes must be equal as well.
+  return std::tie(lhs.stack_frame_id, lhs.type_id) <
+         std::tie(rhs.stack_frame_id, rhs.type_id);
+}
+
+HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+                               TypeNameDeduplicator* type_name_deduplicator,
+                               uint32_t breakdown_threshold_bytes)
+    : stack_frame_deduplicator_(stack_frame_deduplicator),
+      type_name_deduplicator_(type_name_deduplicator),
+      breakdown_threshold_bytes_(breakdown_threshold_bytes) {
+}
+
+HeapDumpWriter::~HeapDumpWriter() {}
+
+bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
+  // The contexts in the bucket are all different, but the [begin, cursor) range
+  // is equal for all contexts in the bucket, and the type names are the same if
+  // |is_broken_down_by_type_name| is set.
+  DCHECK(!bucket.metrics_by_context.empty());
+
+  const AllocationContext* context = bucket.metrics_by_context.front().first;
+
+  const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
+  const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
+  DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
+
+  Entry entry;
+  entry.stack_frame_id = stack_frame_deduplicator_->Insert(
+      backtrace_begin, backtrace_end);
+
+  // Deduplicate the type name, or use ID -1 if type name is not set.
+  entry.type_id = bucket.is_broken_down_by_type_name
+                      ? type_name_deduplicator_->Insert(context->type_name)
+                      : -1;
+
+  entry.size = bucket.size;
+  entry.count = bucket.count;
+
+  auto position_and_inserted = entries_.insert(entry);
+  return position_and_inserted.second;
+}
+
+void HeapDumpWriter::BreakDown(const Bucket& bucket) {
+  auto by_backtrace = BreakDownBy(bucket,
+                                  BreakDownMode::kByBacktrace,
+                                  breakdown_threshold_bytes_);
+  auto by_type_name = BreakDownBy(bucket,
+                                  BreakDownMode::kByTypeName,
+                                  breakdown_threshold_bytes_);
+
+  // Insert entries for the buckets. If a bucket was not present before, it has
+  // not been broken down before, so recursively continue breaking down in that
+  // case. There might be multiple routes to the same entry (first break down
+  // by type name, then by backtrace, or first by backtrace and then by type),
+  // so a set is used to avoid dumping and breaking down entries more than once.
+
+  for (const Bucket& subbucket : by_backtrace)
+    if (AddEntryForBucket(subbucket))
+      BreakDown(subbucket);
+
+  for (const Bucket& subbucket : by_type_name)
+    if (AddEntryForBucket(subbucket))
+      BreakDown(subbucket);
+}
+
+const std::set<Entry>& HeapDumpWriter::Summarize(
+    const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context) {
+  // Start with one bucket that represents the entire heap. Iterate by
+  // reference, because the allocation contexts are going to point to allocation
+  // contexts stored in |metrics_by_context|.
+  Bucket root_bucket;
+  for (const auto& context_and_metrics : metrics_by_context) {
+    DCHECK_GT(context_and_metrics.second.size, 0u);
+    DCHECK_GT(context_and_metrics.second.count, 0u);
+    const AllocationContext* context = &context_and_metrics.first;
+    root_bucket.metrics_by_context.push_back(
+        std::make_pair(context, context_and_metrics.second));
+    root_bucket.size += context_and_metrics.second.size;
+    root_bucket.count += context_and_metrics.second.count;
+  }
+
+  AddEntryForBucket(root_bucket);
+
+  // Recursively break down the heap and fill |entries_| with entries to dump.
+  BreakDown(root_bucket);
+
+  return entries_;
+}
+
+std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
+  std::string buffer;
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
+
+  traced_value->BeginArray("entries");
+
+  for (const Entry& entry : entries) {
+    traced_value->BeginDictionary();
+
+    // Format size as hexadecimal string into |buffer|.
+    SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
+    traced_value->SetString("size", buffer);
+
+    SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
+    traced_value->SetString("count", buffer);
+
+    if (entry.stack_frame_id == -1) {
+      // An empty backtrace (which will have ID -1) is represented by the empty
+      // string, because there is no leaf frame to reference in |stackFrames|.
+      traced_value->SetString("bt", "");
+    } else {
+      // Format index of the leaf frame as a string, because |stackFrames| is a
+      // dictionary, not an array.
+      SStringPrintf(&buffer, "%i", entry.stack_frame_id);
+      traced_value->SetString("bt", buffer);
+    }
+
+    // Type ID -1 (cumulative size for all types) is represented by the absence
+    // of the "type" key in the dictionary.
+    if (entry.type_id != -1) {
+      // Format the type ID as a string.
+      SStringPrintf(&buffer, "%i", entry.type_id);
+      traced_value->SetString("type", buffer);
+    }
+
+    traced_value->EndDictionary();
+  }
+
+  traced_value->EndArray();  // "entries"
+  return traced_value;
+}
+
+}  // namespace internal
+
+std::unique_ptr<TracedValue> ExportHeapDump(
+    const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
+    const MemoryDumpSessionState& session_state) {
+  internal::HeapDumpWriter writer(
+      session_state.stack_frame_deduplicator(),
+      session_state.type_name_deduplicator(),
+      session_state.memory_dump_config().heap_profiler_options
+          .breakdown_threshold_bytes);
+  return Serialize(writer.Summarize(metrics_by_context));
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.h b/base/trace_event/heap_profiler_heap_dump_writer.h
new file mode 100644
index 0000000..6e9d29d
--- /dev/null
+++ b/base/trace_event/heap_profiler_heap_dump_writer.h
@@ -0,0 +1,113 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+class MemoryDumpSessionState;
+class StackFrameDeduplicator;
+class TracedValue;
+class TypeNameDeduplicator;
+
+// Aggregates |metrics_by_context|, recursively breaks down the heap, and
+// returns a traced value with an "entries" array that can be dumped in the
+// trace log, following the format described in https://goo.gl/KY7zVE. The
+// number of entries is kept reasonable because long tails are not included.
+BASE_EXPORT std::unique_ptr<TracedValue> ExportHeapDump(
+    const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
+    const MemoryDumpSessionState& session_state);
+
+namespace internal {
+
+namespace {
+struct Bucket;
+}
+
+// An entry in the "entries" array as described in https://goo.gl/KY7zVE.
+struct BASE_EXPORT Entry {
+  size_t size;
+  size_t count;
+
+  // References a backtrace in the stack frame deduplicator. -1 means empty
+  // backtrace (the root of the tree).
+  int stack_frame_id;
+
+  // References a type name in the type name deduplicator. -1 indicates that
+  // the size is the cumulative size for all types (the root of the tree).
+  int type_id;
+};
+
+// Comparison operator to enable putting |Entry| in a |std::set|.
+BASE_EXPORT bool operator<(Entry lhs, Entry rhs);
+
+// Serializes entries to an "entries" array in a traced value.
+BASE_EXPORT std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& dump);
+
+// Helper class to dump a snapshot of an |AllocationRegister| or other heap
+// bookkeeping structure into a |TracedValue|. This class is intended to be
+// used as a one-shot local instance on the stack.
+class BASE_EXPORT HeapDumpWriter {
+ public:
+  // The |stack_frame_deduplicator| and |type_name_deduplicator| are not owned.
+  // The heap dump writer assumes exclusive access to them during the lifetime
+  // of the dump writer. The heap dumps are broken down for allocations bigger
+  // than |breakdown_threshold_bytes|.
+  HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+                 TypeNameDeduplicator* type_name_deduplicator,
+                 uint32_t breakdown_threshold_bytes);
+
+  ~HeapDumpWriter();
+
+  // Aggregates allocations to compute the total size of the heap, then breaks
+  // down the heap recursively. This produces the values that should be dumped
+  // in the "entries" array. The number of entries is kept reasonable because
+  // long tails are not included. Use |Serialize| to convert to a traced value.
+  const std::set<Entry>& Summarize(
+      const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context);
+
+ private:
+  // Inserts an |Entry| for |Bucket| into |entries_|. Returns false if the
+  // entry was present before, true if it was not.
+  bool AddEntryForBucket(const Bucket& bucket);
+
+  // Recursively breaks down a bucket into smaller buckets and adds entries for
+  // the buckets worth dumping to |entries_|.
+  void BreakDown(const Bucket& bucket);
+
+  // The collection of entries that is filled by |Summarize|.
+  std::set<Entry> entries_;
+
+  // Helper for generating the |stackFrames| dictionary. Not owned, must outlive
+  // this heap dump writer instance.
+  StackFrameDeduplicator* const stack_frame_deduplicator_;
+
+  // Helper for converting type names to IDs. Not owned, must outlive this heap
+  // dump writer instance.
+  TypeNameDeduplicator* const type_name_deduplicator_;
+
+  // Minimum size of an allocation for which an allocation bucket will be
+  // broken down with children.
+  uint32_t breakdown_threshold_bytes_;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapDumpWriter);
+};
+
+}  // namespace internal
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index cf3d198..49a2350 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -4,6 +4,7 @@
 
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 
+#include <inttypes.h>
 #include <stddef.h>
 
 #include <string>
@@ -19,6 +20,7 @@
 StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
                                              int parent_frame_index)
     : frame(frame), parent_frame_index(parent_frame_index) {}
+StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
 StackFrameDeduplicator::FrameNode::~FrameNode() {}
 
 StackFrameDeduplicator::StackFrameDeduplicator() {}
@@ -30,7 +32,7 @@
   std::map<StackFrame, int>* nodes = &roots_;
 
   // Loop through the frames, early out when a frame is null.
-  for (const StackFrame* it = beginFrame; it != endFrame && *it; it++) {
+  for (const StackFrame* it = beginFrame; it != endFrame; it++) {
     StackFrame frame = *it;
 
     auto node = nodes->find(frame);
@@ -76,8 +78,26 @@
     SStringPrintf(&stringify_buffer, "\"%d\":", i);
     out->append(stringify_buffer);
 
-    scoped_refptr<TracedValue> frame_node_value = new TracedValue;
-    frame_node_value->SetString("name", frame_node->frame);
+    std::unique_ptr<TracedValue> frame_node_value(new TracedValue);
+    const StackFrame& frame = frame_node->frame;
+    switch (frame.type) {
+      case StackFrame::Type::TRACE_EVENT_NAME:
+        frame_node_value->SetString(
+            "name", static_cast<const char*>(frame.value));
+        break;
+      case StackFrame::Type::THREAD_NAME:
+        SStringPrintf(&stringify_buffer,
+                      "[Thread: %s]",
+                      static_cast<const char*>(frame.value));
+        frame_node_value->SetString("name", stringify_buffer);
+        break;
+      case StackFrame::Type::PROGRAM_COUNTER:
+        SStringPrintf(&stringify_buffer,
+                      "pc:%" PRIxPTR,
+                      reinterpret_cast<uintptr_t>(frame.value));
+        frame_node_value->SetString("name", stringify_buffer);
+        break;
+    }
     if (frame_node->parent_frame_index >= 0) {
       SStringPrintf(&stringify_buffer, "%d", frame_node->parent_frame_index);
       frame_node_value->SetString("parent", stringify_buffer);
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
index 60df1ba..4932534 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.h
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -31,6 +31,7 @@
   // A node in the call tree.
   struct FrameNode {
     FrameNode(StackFrame frame, int parent_frame_index);
+    FrameNode(const FrameNode& other);
     ~FrameNode();
 
     StackFrame frame;
@@ -46,6 +47,7 @@
   using ConstIterator = std::vector<FrameNode>::const_iterator;
 
   StackFrameDeduplicator();
+  ~StackFrameDeduplicator() override;
 
   // Inserts a backtrace where |beginFrame| is a pointer to the bottom frame
   // (e.g. main) and |endFrame| is a pointer past the top frame (most recently
@@ -65,8 +67,6 @@
   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
 
  private:
-  ~StackFrameDeduplicator() override;
-
   std::map<StackFrame, int> roots_;
   std::vector<FrameNode> frames_;
 
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
index 433c633..2215ede 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
@@ -2,12 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+
 #include <iterator>
+#include <memory>
 
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -15,11 +16,11 @@
 
 // Define all strings once, because the deduplicator requires pointer equality,
 // and string interning is unreliable.
-const char kBrowserMain[] = "BrowserMain";
-const char kRendererMain[] = "RendererMain";
-const char kCreateWidget[] = "CreateWidget";
-const char kInitialize[] = "Initialize";
-const char kMalloc[] = "malloc";
+StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
+StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
+StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
+StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
+StackFrame kMalloc = StackFrame::FromTraceEventName("malloc");
 
 TEST(StackFrameDeduplicatorTest, SingleBacktrace) {
   StackFrame bt[] = {kBrowserMain, kCreateWidget, kMalloc};
@@ -30,7 +31,7 @@
   //   CreateWidget [1]
   //     malloc [2]
 
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
 
   auto iter = dedup->begin();
@@ -46,6 +47,35 @@
   ASSERT_EQ(iter + 3, dedup->end());
 }
 
+TEST(StackFrameDeduplicatorTest, SingleBacktraceWithNull) {
+  StackFrame null_frame = StackFrame::FromTraceEventName(nullptr);
+  StackFrame bt[] = {kBrowserMain, null_frame, kMalloc};
+
+  // Deduplicator doesn't care about what's inside StackFrames,
+  // and handles nullptr StackFrame values as any other.
+  //
+  // So the call tree should look like this (index in brackets).
+  //
+  // BrowserMain [0]
+  //   (null) [1]
+  //     malloc [2]
+
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
+
+  auto iter = dedup->begin();
+  ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+  ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+  ASSERT_EQ(null_frame, (iter + 1)->frame);
+  ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+  ASSERT_EQ(kMalloc, (iter + 2)->frame);
+  ASSERT_EQ(1, (iter + 2)->parent_frame_index);
+
+  ASSERT_EQ(iter + 3, dedup->end());
+}
+
 // Test that there can be different call trees (there can be multiple bottom
 // frames). Also verify that frames with the same name but a different caller
 // are represented as distinct nodes.
@@ -63,7 +93,7 @@
   // Note that there will be two instances of CreateWidget,
   // with different parents.
 
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
   ASSERT_EQ(3, dedup->Insert(std::begin(bt1), std::end(bt1)));
 
@@ -95,7 +125,7 @@
   //
   // Note that BrowserMain will be re-used.
 
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
   ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
 
@@ -118,17 +148,5 @@
   ASSERT_EQ(dedup->begin() + 3, dedup->end());
 }
 
-TEST(StackFrameDeduplicatorTest, NullPaddingIsRemoved) {
-  StackFrame bt0[] = {kBrowserMain, nullptr, nullptr, nullptr};
-
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
-
-  // There are four frames in the backtrace, but the null pointers should be
-  // skipped, so only one frame is inserted, which will have index 0.
-  ASSERT_EQ(4u, arraysize(bt0));
-  ASSERT_EQ(0, dedup->Insert(std::begin(bt0), std::end(bt0)));
-  ASSERT_EQ(dedup->begin() + 1, dedup->end());
-}
-
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.cc b/base/trace_event/heap_profiler_type_name_deduplicator.cc
index e7f57c8..055f86a 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -16,6 +16,33 @@
 namespace base {
 namespace trace_event {
 
+namespace {
+
+// Extract directory name if |type_name| was file name. Otherwise, return
+// |type_name|.
+StringPiece ExtractDirNameFromFileName(const char* type_name) {
+  StringPiece result(type_name);
+  size_t last_seperator = result.find_last_of("\\/");
+
+  // If |type_name| was a not a file path, the seperator will not be found, so
+  // the whole type name is returned.
+  if (last_seperator == StringPiece::npos)
+    return result;
+
+  // Remove the file name from the path.
+  result.remove_suffix(result.length() - last_seperator);
+
+  // Remove the parent directory references.
+  const char kParentDirectory[] = "..";
+  const size_t kParentDirectoryLength = 3; // '../' or '..\'.
+  while (result.starts_with(kParentDirectory)) {
+    result.remove_prefix(kParentDirectoryLength);
+  }
+  return result;
+}
+
+}  // namespace
+
 TypeNameDeduplicator::TypeNameDeduplicator() {
   // A null pointer has type ID 0 ("unknown type");
   type_ids_.insert(std::make_pair(nullptr, 0));
@@ -53,9 +80,13 @@
     // a dictionary.
     SStringPrintf(&buffer, ",\"%d\":", it->second);
 
+    // TODO(ssid): crbug.com/594803 the type name is misused for file name in
+    // some cases.
+    StringPiece type_info = ExtractDirNameFromFileName(it->first);
+
     // |EscapeJSONString| appends, it does not overwrite |buffer|.
     bool put_in_quotes = true;
-    EscapeJSONString(it->first, put_in_quotes, &buffer);
+    EscapeJSONString(type_info, put_in_quotes, &buffer);
     out->append(buffer);
   }
 
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.h b/base/trace_event/heap_profiler_type_name_deduplicator.h
index 317ea5e..2d26c73 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator.h
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.h
@@ -21,19 +21,18 @@
 class BASE_EXPORT TypeNameDeduplicator : public ConvertableToTraceFormat {
  public:
   TypeNameDeduplicator();
+  ~TypeNameDeduplicator() override;
 
   // Inserts a type name and returns its ID.
   int Insert(const char* type_name);
 
+  // Writes the type ID -> type name mapping to the trace log.
+  void AppendAsTraceFormat(std::string* out) const override;
+
   // Estimates memory overhead including |sizeof(TypeNameDeduplicator)|.
   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
 
  private:
-  ~TypeNameDeduplicator() override;
-
-  // Writes the type ID -> type name mapping to the trace log.
-  void AppendAsTraceFormat(std::string* out) const override;
-
   // Map from type name to type ID.
   std::map<const char*, int> type_ids_;
 
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
index 82c8fb5..b2e681a 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
@@ -2,11 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <memory>
 #include <string>
 
 #include "base/json/json_reader.h"
-#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -14,6 +13,8 @@
 namespace base {
 namespace trace_event {
 
+namespace {
+
 // Define all strings once, because the deduplicator requires pointer equality,
 // and string interning is unreliable.
 const char kInt[] = "int";
@@ -21,12 +22,44 @@
 const char kString[] = "string";
 const char kNeedsEscape[] = "\"quotes\"";
 
-scoped_ptr<Value> DumpAndReadBack(const ConvertableToTraceFormat& convertable) {
+#if defined(OS_POSIX)
+const char kTaskFileName[] = "../../base/trace_event/trace_log.cc";
+const char kTaskPath[] = "base/trace_event";
+#else
+const char kTaskFileName[] = "..\\..\\base\\memory\\memory_win.cc";
+const char kTaskPath[] = "base\\memory";
+#endif
+
+std::unique_ptr<Value> DumpAndReadBack(
+    const TypeNameDeduplicator& deduplicator) {
   std::string json;
-  convertable.AppendAsTraceFormat(&json);
+  deduplicator.AppendAsTraceFormat(&json);
   return JSONReader::Read(json);
 }
 
+// Inserts a single type name into a new TypeNameDeduplicator instance and
+// checks if the value gets inserted and the exported value for |type_name| is
+// the same as |expected_value|.
+void TestInsertTypeAndReadback(const char* type_name,
+                               const char* expected_value) {
+  std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
+  ASSERT_EQ(1, dedup->Insert(type_name));
+
+  std::unique_ptr<Value> type_names = DumpAndReadBack(*dedup);
+  ASSERT_NE(nullptr, type_names);
+
+  const DictionaryValue* dictionary;
+  ASSERT_TRUE(type_names->GetAsDictionary(&dictionary));
+
+  // When the type name was inserted, it got ID 1. The exported key "1"
+  // should be equal to |expected_value|.
+  std::string value;
+  ASSERT_TRUE(dictionary->GetString("1", &value));
+  ASSERT_EQ(expected_value, value);
+}
+
+}  // namespace
+
 TEST(TypeNameDeduplicatorTest, Deduplication) {
   // The type IDs should be like this:
   // 0: [unknown]
@@ -34,7 +67,7 @@
   // 2: bool
   // 3: string
 
-  scoped_refptr<TypeNameDeduplicator> dedup = new TypeNameDeduplicator;
+  std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(kInt));
   ASSERT_EQ(2, dedup->Insert(kBool));
   ASSERT_EQ(3, dedup->Insert(kString));
@@ -49,22 +82,14 @@
 }
 
 TEST(TypeNameDeduplicatorTest, EscapeTypeName) {
-  scoped_refptr<TypeNameDeduplicator> dedup = new TypeNameDeduplicator;
-  ASSERT_EQ(1, dedup->Insert(kNeedsEscape));
-
   // Reading json should not fail, because the type name should have been
-  // escaped properly.
-  scoped_ptr<Value> type_names = DumpAndReadBack(*dedup);
-  ASSERT_NE(nullptr, type_names);
+  // escaped properly and exported value should contain quotes.
+  TestInsertTypeAndReadback(kNeedsEscape, kNeedsEscape);
+}
 
-  const DictionaryValue* dictionary;
-  ASSERT_TRUE(type_names->GetAsDictionary(&dictionary));
-
-  // When the type name was inserted, it got ID 1. The exported key "1"
-  // should contain the name, with quotes.
-  std::string type_name;
-  ASSERT_TRUE(dictionary->GetString("1", &type_name));
-  ASSERT_EQ("\"quotes\"", type_name);
+TEST(TypeNameDeduplicatorTest, TestExtractFileName) {
+  // The exported value for passed file name should be the folders in the path.
+  TestInsertTypeAndReadback(kTaskFileName, kTaskPath);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 229a8c1..c3d3258 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -7,7 +7,14 @@
 #include <stddef.h>
 
 #include "base/allocator/allocator_extension.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/features.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_allocation_register.h"
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
 #include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
 #include "build/build_config.h"
 
 #if defined(OS_MACOSX)
@@ -19,6 +26,65 @@
 namespace base {
 namespace trace_event {
 
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+namespace {
+
+using allocator::AllocatorDispatch;
+
+void* HookAlloc(const AllocatorDispatch* self, size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->alloc_function(next, size);
+  if (ptr)
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+  return ptr;
+}
+
+void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->alloc_zero_initialized_function(next, n, size);
+  if (ptr)
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
+  return ptr;
+}
+
+void* HookllocAligned(const AllocatorDispatch* self,
+                      size_t alignment,
+                      size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->alloc_aligned_function(next, alignment, size);
+  if (ptr)
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+  return ptr;
+}
+
+void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->realloc_function(next, address, size);
+  MallocDumpProvider::GetInstance()->RemoveAllocation(address);
+  if (size > 0)  // realloc(size == 0) means free().
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+  return ptr;
+}
+
+void HookFree(const AllocatorDispatch* self, void* address) {
+  if (address)
+    MallocDumpProvider::GetInstance()->RemoveAllocation(address);
+  const AllocatorDispatch* const next = self->next;
+  next->free_function(next, address);
+}
+
+AllocatorDispatch g_allocator_hooks = {
+    &HookAlloc,         /* alloc_function */
+    &HookZeroInitAlloc, /* alloc_zero_initialized_function */
+    &HookllocAligned,   /* alloc_aligned_function */
+    &HookRealloc,       /* realloc_function */
+    &HookFree,          /* free_function */
+    nullptr,            /* next */
+};
+
+}  // namespace
+#endif  // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+
 // static
 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
 
@@ -28,13 +94,14 @@
                    LeakySingletonTraits<MallocDumpProvider>>::get();
 }
 
-MallocDumpProvider::MallocDumpProvider() {}
+MallocDumpProvider::MallocDumpProvider()
+    : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {}
 
 MallocDumpProvider::~MallocDumpProvider() {}
 
 // Called at trace dump point time. Creates a snapshot the memory counters for
 // the current process.
-bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& /* args */,
+bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
                                       ProcessMemoryDump* pmd) {
   size_t total_virtual_size = 0;
   size_t resident_size = 0;
@@ -97,8 +164,95 @@
                           resident_size - allocated_objects_size);
   }
 
+  // Heap profiler dumps.
+  if (!heap_profiler_enabled_)
+    return true;
+
+  // The dumps of the heap profiler should be created only when heap profiling
+  // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested.
+  // However, when enabled, the overhead of the heap profiler should be always
+  // reported to avoid oscillations of the malloc total in LIGHT dumps.
+
+  tid_dumping_heap_ = PlatformThread::CurrentId();
+  // At this point the Insert/RemoveAllocation hooks will ignore this thread.
+  // Enclosing all the temporariy data structures in a scope, so that the heap
+  // profiler does not see unabalanced malloc/free calls from these containers.
+  {
+    TraceEventMemoryOverhead overhead;
+    hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+    {
+      AutoLock lock(allocation_register_lock_);
+      if (allocation_register_) {
+        if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+          for (const auto& alloc_size : *allocation_register_) {
+            AllocationMetrics& metrics = metrics_by_context[alloc_size.context];
+            metrics.size += alloc_size.size;
+            metrics.count++;
+          }
+        }
+        allocation_register_->EstimateTraceMemoryOverhead(&overhead);
+      }
+    }  // lock(allocation_register_lock_)
+    pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
+  }
+  tid_dumping_heap_ = kInvalidThreadId;
+
   return true;
 }
 
+void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+  if (enabled) {
+    {
+      AutoLock lock(allocation_register_lock_);
+      allocation_register_.reset(new AllocationRegister());
+    }
+    allocator::InsertAllocatorDispatch(&g_allocator_hooks);
+  } else {
+    AutoLock lock(allocation_register_lock_);
+    allocation_register_.reset();
+    // Insert/RemoveAllocation below will no-op if the register is torn down.
+    // Once disabled, heap profiling will not re-enabled anymore for the
+    // lifetime of the process.
+  }
+#endif
+  heap_profiler_enabled_ = enabled;
+}
+
+void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
+  // CurrentId() can be a slow operation (crbug.com/497226). This apparently
+  // redundant condition short circuits the CurrentID() calls when unnecessary.
+  if (tid_dumping_heap_ != kInvalidThreadId &&
+      tid_dumping_heap_ == PlatformThread::CurrentId())
+    return;
+
+  // AllocationContextTracker will return nullptr when called re-reentrantly.
+  // This is the case of GetInstanceForCurrentThread() being called for the
+  // first time, which causes a new() inside the tracker which re-enters the
+  // heap profiler, in which case we just want to early out.
+  auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
+  if (!tracker)
+    return;
+  AllocationContext context = tracker->GetContextSnapshot();
+
+  AutoLock lock(allocation_register_lock_);
+  if (!allocation_register_)
+    return;
+
+  allocation_register_->Insert(address, size, context);
+}
+
+void MallocDumpProvider::RemoveAllocation(void* address) {
+  // No re-entrancy is expected here as none of the calls below should
+  // cause a free()-s (|allocation_register_| does its own heap management).
+  if (tid_dumping_heap_ != kInvalidThreadId &&
+      tid_dumping_heap_ == PlatformThread::CurrentId())
+    return;
+  AutoLock lock(allocation_register_lock_);
+  if (!allocation_register_)
+    return;
+  allocation_register_->Remove(address);
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
index 63fc1b0..4746cf5 100644
--- a/base/trace_event/malloc_dump_provider.h
+++ b/base/trace_event/malloc_dump_provider.h
@@ -6,9 +6,12 @@
 #define BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
 
 #include <istream>
+#include <memory>
 
 #include "base/macros.h"
 #include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "build/build_config.h"
 
@@ -20,6 +23,8 @@
 namespace base {
 namespace trace_event {
 
+class AllocationRegister;
+
 // Dump provider which collects process-wide memory stats.
 class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
  public:
@@ -33,12 +38,28 @@
   bool OnMemoryDump(const MemoryDumpArgs& args,
                     ProcessMemoryDump* pmd) override;
 
+  void OnHeapProfilingEnabled(bool enabled) override;
+
+  // For heap profiling.
+  void InsertAllocation(void* address, size_t size);
+  void RemoveAllocation(void* address);
+
  private:
   friend struct DefaultSingletonTraits<MallocDumpProvider>;
 
   MallocDumpProvider();
   ~MallocDumpProvider() override;
 
+  // For heap profiling.
+  bool heap_profiler_enabled_;
+  std::unique_ptr<AllocationRegister> allocation_register_;
+  Lock allocation_register_lock_;
+
+  // When in OnMemoryDump(), this contains the current thread ID.
+  // This is to prevent re-entrancy in the heap profiler when the heap dump
+  // generation is malloc/new-ing for its own bookeeping data structures.
+  PlatformThreadId tid_dumping_heap_;
+
   DISALLOW_COPY_AND_ASSIGN(MallocDumpProvider);
 };
 
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
index 5c5af7e..7583763 100644
--- a/base/trace_event/memory_allocator_dump.cc
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -28,7 +28,8 @@
     : absolute_name_(absolute_name),
       process_memory_dump_(process_memory_dump),
       attributes_(new TracedValue),
-      guid_(guid) {
+      guid_(guid),
+      flags_(Flags::DEFAULT) {
   // The |absolute_name| cannot be empty.
   DCHECK(!absolute_name.empty());
 
@@ -79,6 +80,13 @@
 void MemoryAllocatorDump::AddString(const char* name,
                                     const char* units,
                                     const std::string& value) {
+  // String attributes are disabled in background mode.
+  if (process_memory_dump_->dump_args().level_of_detail ==
+      MemoryDumpLevelOfDetail::BACKGROUND) {
+    NOTREACHED();
+    return;
+  }
+
   attributes_->BeginDictionary(name);
   attributes_->SetString("type", kTypeString);
   attributes_->SetString("units", units);
@@ -90,6 +98,8 @@
   value->BeginDictionaryWithCopiedName(absolute_name_);
   value->SetString("guid", guid_.ToString());
   value->SetValue("attrs", *attributes_);
+  if (flags_)
+    value->SetInteger("flags", flags_);
   value->EndDictionary();  // "allocator_name/heap_subheap": { ... }
 }
 
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index 6c514fa..7d10236 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -7,12 +7,12 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "base/trace_event/memory_allocator_dump_guid.h"
 #include "base/values.h"
 
@@ -26,6 +26,13 @@
 // Data model for user-land memory allocator dumps.
 class BASE_EXPORT MemoryAllocatorDump {
  public:
+  enum Flags {
+    DEFAULT = 0,
+
+    // A dump marked weak will be discarded by TraceViewer.
+    WEAK = 1 << 0,
+  };
+
   // MemoryAllocatorDump is owned by ProcessMemoryDump.
   MemoryAllocatorDump(const std::string& absolute_name,
                       ProcessMemoryDump* process_memory_dump,
@@ -68,6 +75,11 @@
     return process_memory_dump_;
   }
 
+  // Use enum Flags to set values.
+  void set_flags(int flags) { flags_ |= flags; }
+  void clear_flags(int flags) { flags_ &= ~flags; }
+  int flags() { return flags_; }
+
   // |guid| is an optional global dump identifier, unique across all processes
   // within the scope of a global dump. It is only required when using the
   // graph APIs (see TODO_method_name) to express retention / suballocation or
@@ -81,8 +93,9 @@
  private:
   const std::string absolute_name_;
   ProcessMemoryDump* const process_memory_dump_;  // Not owned (PMD owns this).
-  scoped_refptr<TracedValue> attributes_;
+  std::unique_ptr<TracedValue> attributes_;
   MemoryAllocatorDumpGuid guid_;
+  int flags_;  // See enum Flags.
 
   // A local buffer for Sprintf conversion on fastpath. Avoids allocating
   // temporary strings on each AddScalar() call.
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index d1cfe91..1bf9715 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -52,11 +52,12 @@
   }
 };
 
-scoped_ptr<Value> CheckAttribute(const MemoryAllocatorDump* dump,
-                                 const std::string& name,
-                                 const char* expected_type,
-                                 const char* expected_units) {
-  scoped_ptr<Value> raw_attrs = dump->attributes_for_testing()->ToBaseValue();
+std::unique_ptr<Value> CheckAttribute(const MemoryAllocatorDump* dump,
+                                      const std::string& name,
+                                      const char* expected_type,
+                                      const char* expected_units) {
+  std::unique_ptr<Value> raw_attrs =
+      dump->attributes_for_testing()->ToBaseValue();
   DictionaryValue* args = nullptr;
   DictionaryValue* arg = nullptr;
   std::string arg_value;
@@ -68,7 +69,7 @@
   EXPECT_TRUE(arg->GetString("units", &arg_value));
   EXPECT_EQ(expected_units, arg_value);
   EXPECT_TRUE(arg->Get("value", &out_value));
-  return out_value ? out_value->CreateDeepCopy() : scoped_ptr<Value>();
+  return out_value ? out_value->CreateDeepCopy() : std::unique_ptr<Value>();
 }
 
 void CheckString(const MemoryAllocatorDump* dump,
@@ -104,7 +105,7 @@
 }  // namespace
 
 TEST(MemoryAllocatorDumpTest, GuidGeneration) {
-  scoped_ptr<MemoryAllocatorDump> mad(
+  std::unique_ptr<MemoryAllocatorDump> mad(
       new MemoryAllocatorDump("foo", nullptr, MemoryAllocatorDumpGuid(0x42u)));
   ASSERT_EQ("42", mad->guid().ToString());
 
@@ -128,8 +129,8 @@
 
 TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
   FakeMemoryAllocatorDumpProvider fmadp;
-  ProcessMemoryDump pmd(new MemoryDumpSessionState(nullptr, nullptr));
   MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
 
   fmadp.OnMemoryDump(dump_args, &pmd);
 
@@ -167,7 +168,7 @@
   ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectCount));
 
   // Check that the AsValueInfo doesn't hit any DCHECK.
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
   pmd.AsValueInto(traced_value.get());
 }
 
@@ -175,7 +176,8 @@
 #if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
 TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
   FakeMemoryAllocatorDumpProvider fmadp;
-  ProcessMemoryDump pmd(new MemoryDumpSessionState(nullptr, nullptr));
+  MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
+  ProcessMemoryDump pmd(new MemoryDumpSessionState, dump_args);
   pmd.CreateAllocatorDump("foo_allocator");
   pmd.CreateAllocatorDump("bar_allocator/heap");
   ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index aa81e00..eed070a 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -11,26 +11,24 @@
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/compiler_specific.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/debug/debugging_flags.h"
+#include "base/debug/stack_trace.h"
+#include "base/memory/ptr_util.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
 #include "base/trace_event/malloc_dump_provider.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
 #include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_argument.h"
 #include "build/build_config.h"
 
-#if !defined(OS_NACL)
-#include "base/trace_event/process_memory_totals_dump_provider.h"
-#endif
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-#include "base/trace_event/process_memory_maps_dump_provider.h"
-#endif
-
 #if defined(OS_ANDROID)
 #include "base/trace_event/java_heap_dump_provider_android.h"
 #endif
@@ -49,27 +47,8 @@
 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
 
 StaticAtomicSequenceNumber g_next_guid;
-uint32_t g_periodic_dumps_count = 0;
-uint32_t g_heavy_dumps_rate = 0;
 MemoryDumpManager* g_instance_for_testing = nullptr;
 
-void RequestPeriodicGlobalDump() {
-  MemoryDumpLevelOfDetail level_of_detail;
-  if (g_heavy_dumps_rate == 0) {
-    level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
-  } else {
-    level_of_detail = g_periodic_dumps_count == 0
-                          ? MemoryDumpLevelOfDetail::DETAILED
-                          : MemoryDumpLevelOfDetail::LIGHT;
-
-    if (++g_periodic_dumps_count == g_heavy_dumps_rate)
-      g_periodic_dumps_count = 0;
-  }
-
-  MemoryDumpManager::GetInstance()->RequestGlobalDump(
-      MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
-}
-
 // Callback wrapper to hook upon the completion of RequestGlobalDump() and
 // inject trace markers.
 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -85,6 +64,33 @@
   }
 }
 
+// Proxy class which wraps a ConvertableToTraceFormat owned by the
+// |session_state| into a proxy object that can be added to the trace event log.
+// This is to solve the problem that the MemoryDumpSessionState is refcounted
+// but the tracing subsystem wants a std::unique_ptr<ConvertableToTraceFormat>.
+template <typename T>
+struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
+  using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const;
+
+  SessionStateConvertableProxy(
+      scoped_refptr<MemoryDumpSessionState> session_state,
+      GetterFunctPtr getter_function)
+      : session_state(session_state), getter_function(getter_function) {}
+
+  void AppendAsTraceFormat(std::string* out) const override {
+    return (session_state.get()->*getter_function)()->AppendAsTraceFormat(out);
+  }
+
+  void EstimateTraceMemoryOverhead(
+      TraceEventMemoryOverhead* overhead) override {
+    return (session_state.get()->*getter_function)()
+        ->EstimateTraceMemoryOverhead(overhead);
+  }
+
+  scoped_refptr<MemoryDumpSessionState> session_state;
+  GetterFunctPtr const getter_function;
+};
+
 }  // namespace
 
 // static
@@ -92,6 +98,9 @@
     TRACE_DISABLED_BY_DEFAULT("memory-infra");
 
 // static
+const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
+
+// static
 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
 
 // static
@@ -126,22 +135,56 @@
       is_coordinator_(false),
       memory_tracing_enabled_(0),
       tracing_process_id_(kInvalidTracingProcessId),
-      dumper_registrations_ignored_for_testing_(false) {
+      dumper_registrations_ignored_for_testing_(false),
+      heap_profiling_enabled_(false) {
   g_next_guid.GetNext();  // Make sure that first guid is not zero.
 
-  heap_profiling_enabled_ = CommandLine::InitializedForCurrentProcess()
-                                ? CommandLine::ForCurrentProcess()->HasSwitch(
-                                      switches::kEnableHeapProfiling)
-                                : false;
-
-  if (heap_profiling_enabled_)
-    AllocationContextTracker::SetCaptureEnabled(true);
+  // At this point the command line may not be initialized but we try to
+  // enable the heap profiler to capture allocations as soon as possible.
+  EnableHeapProfilingIfNeeded();
 }
 
 MemoryDumpManager::~MemoryDumpManager() {
   TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
 }
 
+void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
+  if (heap_profiling_enabled_)
+    return;
+
+  if (!CommandLine::InitializedForCurrentProcess() ||
+      !CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kEnableHeapProfiling))
+    return;
+
+  std::string profiling_mode = CommandLine::ForCurrentProcess()
+      ->GetSwitchValueASCII(switches::kEnableHeapProfiling);
+  if (profiling_mode == "") {
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+  }
+  else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
+#if HAVE_TRACE_STACK_FRAME_POINTERS && \
+    (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
+    // We need frame pointers for native tracing to work, and they are
+    // enabled in profiling and debug builds.
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::NATIVE_STACK);
+#else
+    CHECK(false) << "'" << profiling_mode << "' mode for "
+                 << switches::kEnableHeapProfiling << " flag is not supported "
+                 << "for this platform / build type.";
+#endif
+  } else {
+    CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
+               << switches::kEnableHeapProfiling << " flag.";
+  }
+
+  for (auto mdp : dump_providers_)
+    mdp->dump_provider->OnHeapProfilingEnabled(true);
+  heap_profiling_enabled_ = true;
+}
+
 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
                                    bool is_coordinator) {
   {
@@ -150,23 +193,14 @@
     DCHECK(!delegate_);
     delegate_ = delegate;
     is_coordinator_ = is_coordinator;
+    EnableHeapProfilingIfNeeded();
   }
 
 // Enable the core dump providers.
-#if !defined(OS_NACL)
-  RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance(),
-                       "ProcessMemoryTotals", nullptr);
-#endif
-
 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
   RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
 #endif
 
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-  RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance(),
-                       "ProcessMemoryMaps", nullptr);
-#endif
-
 #if defined(OS_ANDROID)
   RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
                        nullptr);
@@ -188,13 +222,45 @@
 void MemoryDumpManager::RegisterDumpProvider(
     MemoryDumpProvider* mdp,
     const char* name,
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+    scoped_refptr<SingleThreadTaskRunner> task_runner,
+    MemoryDumpProvider::Options options) {
+  options.dumps_on_single_thread_task_runner = true;
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
+  // Set |dumps_on_single_thread_task_runner| to true because all providers
+  // without task runner are run on dump thread.
+  MemoryDumpProvider::Options options;
+  options.dumps_on_single_thread_task_runner = true;
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    MemoryDumpProvider::Options options) {
+  DCHECK(task_runner);
+  options.dumps_on_single_thread_task_runner = false;
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderInternal(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    scoped_refptr<SequencedTaskRunner> task_runner,
     const MemoryDumpProvider::Options& options) {
   if (dumper_registrations_ignored_for_testing_)
     return;
 
+  bool whitelisted_for_background_mode = IsMemoryDumpProviderWhitelisted(name);
   scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
-      new MemoryDumpProviderInfo(mdp, name, task_runner, options);
+      new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
+                                 whitelisted_for_background_mode);
 
   {
     AutoLock lock(lock_);
@@ -209,26 +275,19 @@
     mdp->OnHeapProfilingEnabled(true);
 }
 
-void MemoryDumpManager::RegisterDumpProvider(
-    MemoryDumpProvider* mdp,
-    const char* name,
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
-  RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options());
-}
-
 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
   UnregisterDumpProviderInternal(mdp, false /* delete_async */);
 }
 
 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
-    scoped_ptr<MemoryDumpProvider> mdp) {
+    std::unique_ptr<MemoryDumpProvider> mdp) {
   UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
 }
 
 void MemoryDumpManager::UnregisterDumpProviderInternal(
     MemoryDumpProvider* mdp,
     bool take_mdp_ownership_and_delete_async) {
-  scoped_ptr<MemoryDumpProvider> owned_mdp;
+  std::unique_ptr<MemoryDumpProvider> owned_mdp;
   if (take_mdp_ownership_and_delete_async)
     owned_mdp.reset(mdp);
 
@@ -246,28 +305,29 @@
   if (take_mdp_ownership_and_delete_async) {
     // The MDP will be deleted whenever the MDPInfo struct will, that is either:
     // - At the end of this function, if no dump is in progress.
-    // - In the prologue of the ContinueAsyncProcessDump().
+    // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
+    //   removed from |pending_dump_providers|.
     DCHECK(!(*mdp_iter)->owned_dump_provider);
     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
   } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
     // If you hit this DCHECK, your dump provider has a bug.
     // Unregistration of a MemoryDumpProvider is safe only if:
-    // - The MDP has specified a thread affinity (via task_runner()) AND
-    //   the unregistration happens on the same thread (so the MDP cannot
+    // - The MDP has specified a sequenced task runner affinity AND the
+    //   unregistration happens on the same task runner. So that the MDP cannot
     //   unregister and be in the middle of a OnMemoryDump() at the same time.
-    // - The MDP has NOT specified a thread affinity and its ownership is
+    // - The MDP has NOT specified a task runner affinity and its ownership is
     //   transferred via UnregisterAndDeleteDumpProviderSoon().
     // In all the other cases, it is not possible to guarantee that the
     // unregistration will not race with OnMemoryDump() calls.
     DCHECK((*mdp_iter)->task_runner &&
-           (*mdp_iter)->task_runner->BelongsToCurrentThread())
+           (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
         << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
         << "unregister itself in a racy way. Please file a crbug.";
   }
 
   // The MDPInfo instance can still be referenced by the
   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
-  // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump
+  // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
   // to just skip it, without actually invoking the |mdp|, which might be
   // destroyed by the caller soon after this method returns.
   (*mdp_iter)->disabled = true;
@@ -278,8 +338,13 @@
     MemoryDumpType dump_type,
     MemoryDumpLevelOfDetail level_of_detail,
     const MemoryDumpCallback& callback) {
-  // Bail out immediately if tracing is not enabled at all.
-  if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) {
+  // Bail out immediately if tracing is not enabled at all or if the dump mode
+  // is not allowed.
+  if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
+      !IsDumpModeAllowed(level_of_detail)) {
+    VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
+            << " tracing category is not enabled or the requested dump mode is "
+               "not allowed by trace config.";
     if (!callback.is_null())
       callback.Run(0u /* guid */, false /* success */);
     return;
@@ -323,57 +388,77 @@
   TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
                                     TRACE_ID_MANGLE(args.dump_guid));
 
-  scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
+  // If argument filter is enabled then only background mode dumps should be
+  // allowed. In case the trace config passed for background tracing session
+  // missed the allowed modes argument, it crashes here instead of creating
+  // unexpected dumps.
+  if (TraceLog::GetInstance()
+          ->GetCurrentTraceConfig()
+          .IsArgumentFilterEnabled()) {
+    CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
+  }
+
+  std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
   {
     AutoLock lock(lock_);
-    pmd_async_state.reset(
-        new ProcessMemoryDumpAsyncState(args, dump_providers_, session_state_,
-                                        callback, dump_thread_->task_runner()));
+
+    // |dump_thread_| can be nullptr is tracing was disabled before reaching
+    // here. SetupNextMemoryDump() is robust enough to tolerate it and will
+    // NACK the dump.
+    pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
+        args, dump_providers_, session_state_, callback,
+        dump_thread_ ? dump_thread_->task_runner() : nullptr));
+
+    // Safety check to prevent reaching here without calling RequestGlobalDump,
+    // with disallowed modes. If |session_state_| is null then tracing is
+    // disabled.
+    CHECK(!session_state_ ||
+          session_state_->memory_dump_config().allowed_dump_modes.count(
+              args.level_of_detail));
   }
 
   TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
                          TRACE_ID_MANGLE(args.dump_guid),
                          TRACE_EVENT_FLAG_FLOW_OUT);
 
-  // Start the thread hop. |dump_providers_| are kept sorted by thread, so
-  // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
-  // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
-  ContinueAsyncProcessDump(pmd_async_state.release());
+  // Start the process dump. This involves task runner hops as specified by the
+  // MemoryDumpProvider(s) in RegisterDumpProvider()).
+  SetupNextMemoryDump(std::move(pmd_async_state));
 }
 
-// At most one ContinueAsyncProcessDump() can be active at any time for a given
-// PMD, regardless of status of the |lock_|. |lock_| is used here purely to
-// ensure consistency w.r.t. (un)registrations of |dump_providers_|.
-// The linearization of dump providers' OnMemoryDump invocations is achieved by
-// means of subsequent PostTask(s).
-//
-// 1) Prologue:
-//   - If this was the last hop, create a trace event, add it to the trace
-//     and finalize (invoke callback).
-//   - Check if we are on the right thread. If not hop and continue there.
-//   - Check if the dump provider is disabled, if so skip the dump.
-// 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
-// 3) Epilogue:
-//   - Unregister the dump provider if it failed too many times consecutively.
-//   - Pop() the MDP from the |pending_dump_providers| list, eventually
-//     destroying the MDPInfo if that was unregistered in the meantime.
-void MemoryDumpManager::ContinueAsyncProcessDump(
-    ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+// PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
+// PostTask is always required for a generic SequencedTaskRunner to ensure that
+// no other task is running on it concurrently. SetupNextMemoryDump() and
+// InvokeOnMemoryDump() are called alternatively which linearizes the dump
+// provider's OnMemoryDump invocations.
+// At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
+// active at any time for a given PMD, regardless of status of the |lock_|.
+// |lock_| is used in these functions purely to ensure consistency w.r.t.
+// (un)registrations of |dump_providers_|.
+void MemoryDumpManager::SetupNextMemoryDump(
+    std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
   // in the PostTask below don't end up registering their own dump providers
   // (for discounting trace memory overhead) while holding the |lock_|.
   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
 
-  // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
-  // why it isn't is because of the corner case logic of |did_post_task| below,
-  // which needs to take back the ownership of the |pmd_async_state| when a
-  // thread goes away and consequently the PostTask() fails.
-  // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
-  // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
-  // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
-  auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
-  owned_pmd_async_state = nullptr;
-
+  // |dump_thread_| might be destroyed before getting this point.
+  // It means that tracing was disabled right before starting this dump.
+  // Anyway either tracing is stopped or this was the last hop, create a trace
+  // event, add it to the trace and finalize process dump invoking the callback.
+  if (!pmd_async_state->dump_thread_task_runner.get()) {
+    if (pmd_async_state->pending_dump_providers.empty()) {
+      VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+              << " before finalizing the dump";
+    } else {
+      VLOG(1) << kLogPrefix << " failed because dump thread was destroyed"
+              << " before dumping "
+              << pmd_async_state->pending_dump_providers.back().get()->name;
+    }
+    pmd_async_state->dump_successful = false;
+    pmd_async_state->pending_dump_providers.clear();
+  }
   if (pmd_async_state->pending_dump_providers.empty())
     return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
 
@@ -382,60 +467,105 @@
   MemoryDumpProviderInfo* mdpinfo =
       pmd_async_state->pending_dump_providers.back().get();
 
-  // If the dump provider did not specify a thread affinity, dump on
-  // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this
-  // point (if tracing was disabled in the meanwhile). In such case the
-  // PostTask() below will fail, but |task_runner| should always be non-null.
-  SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get();
-  if (!task_runner)
-    task_runner = pmd_async_state->dump_thread_task_runner.get();
-
-  bool post_task_failed = false;
-  if (!task_runner->BelongsToCurrentThread()) {
-    // It's time to hop onto another thread.
-    post_task_failed = !task_runner->PostTask(
-        FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
-                        Unretained(this), Unretained(pmd_async_state.get())));
-    if (!post_task_failed) {
-      // Ownership is tranferred to the next ContinueAsyncProcessDump().
-      ignore_result(pmd_async_state.release());
-      return;
-    }
+  // If we are in background tracing, we should invoke only the whitelisted
+  // providers. Ignore other providers and continue.
+  if (pmd_async_state->req_args.level_of_detail ==
+          MemoryDumpLevelOfDetail::BACKGROUND &&
+      !mdpinfo->whitelisted_for_background_mode) {
+    pmd_async_state->pending_dump_providers.pop_back();
+    return SetupNextMemoryDump(std::move(pmd_async_state));
   }
 
-  // At this point either:
-  // - The MDP has a task runner affinity and we are on the right thread.
-  // - The MDP has a task runner affinity but the underlying thread is gone,
-  //   hence the above |post_task_failed| == true.
-  // - The MDP does NOT have a task runner affinity. A locked access is required
-  //   to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case).
-  bool should_dump;
-  const char* disabled_reason = nullptr;
-  {
+  // If the dump provider did not specify a task runner affinity, dump on
+  // |dump_thread_| which is already checked above for presence.
+  SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
+  if (!task_runner) {
+    DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
+    task_runner = pmd_async_state->dump_thread_task_runner.get();
+    DCHECK(task_runner);
+  }
+
+  if (mdpinfo->options.dumps_on_single_thread_task_runner &&
+      task_runner->RunsTasksOnCurrentThread()) {
+    // If |dumps_on_single_thread_task_runner| is true then no PostTask is
+    // required if we are on the right thread.
+    return InvokeOnMemoryDump(pmd_async_state.release());
+  }
+
+  bool did_post_task = task_runner->PostTask(
+      FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
+                      Unretained(pmd_async_state.get())));
+
+  if (did_post_task) {
+    // Ownership is tranferred to InvokeOnMemoryDump().
+    ignore_result(pmd_async_state.release());
+    return;
+  }
+
+  // PostTask usually fails only if the process or thread is shut down. So, the
+  // dump provider is disabled here. But, don't disable unbound dump providers.
+  // The utility thread is normally shutdown when disabling the trace and
+  // getting here in this case is expected.
+  if (mdpinfo->task_runner) {
+    LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+               << "\". Failed to post task on the task runner provided.";
+
+    // A locked access is required to R/W |disabled| (for the
+    // UnregisterAndDeleteDumpProviderSoon() case).
     AutoLock lock(lock_);
-    if (!mdpinfo->disabled) {
-      if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
-        mdpinfo->disabled = true;
-        disabled_reason =
-            "Dump failure, possibly related with sandboxing (crbug.com/461788)."
-            " Try --no-sandbox.";
-      } else if (post_task_failed) {
-        disabled_reason = "The thread it was meant to dump onto is gone.";
-        mdpinfo->disabled = true;
-      }
+    mdpinfo->disabled = true;
+  }
+
+  // PostTask failed. Ignore the dump provider and continue.
+  pmd_async_state->pending_dump_providers.pop_back();
+  SetupNextMemoryDump(std::move(pmd_async_state));
+}
+
+// This function is called on the right task runner for current MDP. It is
+// either the task runner specified by MDP or |dump_thread_task_runner| if the
+// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
+// (unless disabled).
+void MemoryDumpManager::InvokeOnMemoryDump(
+    ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
+  // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
+  // why it isn't is because of the corner case logic of |did_post_task|
+  // above, which needs to take back the ownership of the |pmd_async_state| when
+  // the PostTask() fails.
+  // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
+  // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
+  // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
+  auto pmd_async_state = WrapUnique(owned_pmd_async_state);
+  owned_pmd_async_state = nullptr;
+
+  // Read MemoryDumpProviderInfo thread safety considerations in
+  // memory_dump_manager.h when accessing |mdpinfo| fields.
+  MemoryDumpProviderInfo* mdpinfo =
+      pmd_async_state->pending_dump_providers.back().get();
+
+  DCHECK(!mdpinfo->task_runner ||
+         mdpinfo->task_runner->RunsTasksOnCurrentThread());
+
+  bool should_dump;
+  {
+    // A locked access is required to R/W |disabled| (for the
+    // UnregisterAndDeleteDumpProviderSoon() case).
+    AutoLock lock(lock_);
+
+    // Unregister the dump provider if it failed too many times consecutively.
+    if (!mdpinfo->disabled &&
+        mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
+      mdpinfo->disabled = true;
+      LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+                 << "\". Dump failed multiple times consecutively.";
     }
     should_dump = !mdpinfo->disabled;
-  }
-
-  if (disabled_reason) {
-    LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". "
-               << disabled_reason;
-  }
+  }  // AutoLock lock(lock_);
 
   if (should_dump) {
     // Invoke the dump provider.
     TRACE_EVENT_WITH_FLOW1(kTraceCategory,
-                           "MemoryDumpManager::ContinueAsyncProcessDump",
+                           "MemoryDumpManager::InvokeOnMemoryDump",
                            TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
                            TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
                            "dump_provider.name", mdpinfo->name);
@@ -444,21 +574,23 @@
     // process), non-zero when the coordinator process creates dumps on behalf
     // of child processes (see crbug.com/461788).
     ProcessId target_pid = mdpinfo->options.target_pid;
-    ProcessMemoryDump* pmd =
-        pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid);
     MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail};
+    ProcessMemoryDump* pmd =
+        pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid,
+                                                                  args);
     bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
     mdpinfo->consecutive_failures =
         dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
-  }  // if (!mdpinfo->disabled)
+  }
 
   pmd_async_state->pending_dump_providers.pop_back();
-  ContinueAsyncProcessDump(pmd_async_state.release());
+  SetupNextMemoryDump(std::move(pmd_async_state));
 }
 
 // static
 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
-    scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+    std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
   DCHECK(pmd_async_state->pending_dump_providers.empty());
   const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
   if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
@@ -477,25 +609,35 @@
   for (const auto& kv : pmd_async_state->process_dumps) {
     ProcessId pid = kv.first;  // kNullProcessId for the current process.
     ProcessMemoryDump* process_memory_dump = kv.second.get();
-    TracedValue* traced_value = new TracedValue();
-    scoped_refptr<ConvertableToTraceFormat> event_value(traced_value);
-    process_memory_dump->AsValueInto(traced_value);
+    std::unique_ptr<TracedValue> traced_value(new TracedValue);
+    process_memory_dump->AsValueInto(traced_value.get());
     traced_value->SetString("level_of_detail",
                             MemoryDumpLevelOfDetailToString(
                                 pmd_async_state->req_args.level_of_detail));
     const char* const event_name =
         MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
 
+    std::unique_ptr<ConvertableToTraceFormat> event_value(
+        std::move(traced_value));
     TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
         TRACE_EVENT_PHASE_MEMORY_DUMP,
         TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
-        dump_guid, pid, kTraceEventNumArgs, kTraceEventArgNames,
+        trace_event_internal::kGlobalScope, dump_guid, pid,
+        kTraceEventNumArgs, kTraceEventArgNames,
         kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
         TRACE_EVENT_FLAG_HAS_ID);
   }
 
+  bool tracing_still_enabled;
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
+  if (!tracing_still_enabled) {
+    pmd_async_state->dump_successful = false;
+    VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
+            << " the dump was completed";
+  }
+
   if (!pmd_async_state->callback.is_null()) {
-    pmd_async_state->callback.Run(dump_guid, true /* success */);
+    pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
     pmd_async_state->callback.Reset();
   }
 
@@ -515,82 +657,71 @@
   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
 
   // Spin-up the thread used to invoke unbound dump providers.
-  scoped_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
+  std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
   if (!dump_thread->Start()) {
     LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
     return;
   }
 
-  AutoLock lock(lock_);
-
-  DCHECK(delegate_);  // At this point we must have a delegate.
-
-  scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator = nullptr;
-  scoped_refptr<TypeNameDeduplicator> type_name_deduplicator = nullptr;
-
+  const TraceConfig trace_config =
+      TraceLog::GetInstance()->GetCurrentTraceConfig();
+  scoped_refptr<MemoryDumpSessionState> session_state =
+      new MemoryDumpSessionState;
+  session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
   if (heap_profiling_enabled_) {
     // If heap profiling is enabled, the stack frame deduplicator and type name
     // deduplicator will be in use. Add a metadata events to write the frames
     // and type IDs.
-    stack_frame_deduplicator = new StackFrameDeduplicator;
-    type_name_deduplicator = new TypeNameDeduplicator;
+    session_state->SetStackFrameDeduplicator(
+        WrapUnique(new StackFrameDeduplicator));
+
+    session_state->SetTypeNameDeduplicator(
+        WrapUnique(new TypeNameDeduplicator));
+
     TRACE_EVENT_API_ADD_METADATA_EVENT(
-        "stackFrames", "stackFrames",
-        scoped_refptr<ConvertableToTraceFormat>(stack_frame_deduplicator));
+        TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
+        "stackFrames",
+        WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
+            session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
+
     TRACE_EVENT_API_ADD_METADATA_EVENT(
-        "typeNames", "typeNames",
-        scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator));
+        TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
+        "typeNames",
+        WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
+            session_state, &MemoryDumpSessionState::type_name_deduplicator)));
   }
 
-  DCHECK(!dump_thread_);
-  dump_thread_ = std::move(dump_thread);
-  session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator,
-                                              type_name_deduplicator);
+  {
+    AutoLock lock(lock_);
 
-  subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+    DCHECK(delegate_);  // At this point we must have a delegate.
+    session_state_ = session_state;
 
-  // TODO(primiano): This is a temporary hack to disable periodic memory dumps
-  // when running memory benchmarks until telemetry uses TraceConfig to
-  // enable/disable periodic dumps. See crbug.com/529184 .
-  if (!is_coordinator_ ||
-      CommandLine::ForCurrentProcess()->HasSwitch(
-          "enable-memory-benchmarking")) {
-    return;
+    DCHECK(!dump_thread_);
+    dump_thread_ = std::move(dump_thread);
+
+    subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
+
+    // TODO(primiano): This is a temporary hack to disable periodic memory dumps
+    // when running memory benchmarks until telemetry uses TraceConfig to
+    // enable/disable periodic dumps. See crbug.com/529184 .
+    if (!is_coordinator_ ||
+        CommandLine::ForCurrentProcess()->HasSwitch(
+            "enable-memory-benchmarking")) {
+      return;
+    }
   }
 
-  // Enable periodic dumps. At the moment the periodic support is limited to at
-  // most one low-detail periodic dump and at most one high-detail periodic
-  // dump. If both are specified the high-detail period must be an integer
-  // multiple of the low-level one.
-  g_periodic_dumps_count = 0;
-  const TraceConfig trace_config =
-      TraceLog::GetInstance()->GetCurrentTraceConfig();
-  const TraceConfig::MemoryDumpConfig& config_list =
-      trace_config.memory_dump_config();
-  if (config_list.empty())
-    return;
-
-  uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
-  uint32_t heavy_dump_period_ms = 0;
-  DCHECK_LE(config_list.size(), 2u);
-  for (const TraceConfig::MemoryDumpTriggerConfig& config : config_list) {
-    DCHECK(config.periodic_interval_ms);
-    if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
-      heavy_dump_period_ms = config.periodic_interval_ms;
-    min_timer_period_ms =
-        std::min(min_timer_period_ms, config.periodic_interval_ms);
-  }
-  DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
-  g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
-
-  periodic_dump_timer_.Start(FROM_HERE,
-                             TimeDelta::FromMilliseconds(min_timer_period_ms),
-                             base::Bind(&RequestPeriodicGlobalDump));
+  // Enable periodic dumps if necessary.
+  periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
 }
 
 void MemoryDumpManager::OnTraceLogDisabled() {
+  // There might be a memory dump in progress while this happens. Therefore,
+  // ensure that the MDM state which depends on the tracing enabled / disabled
+  // state is always accessed by the dumping methods holding the |lock_|.
   subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
-  scoped_ptr<Thread> dump_thread;
+  std::unique_ptr<Thread> dump_thread;
   {
     AutoLock lock(lock_);
     dump_thread = std::move(dump_thread_);
@@ -598,12 +729,20 @@
   }
 
   // Thread stops are blocking and must be performed outside of the |lock_|
-  // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it).
+  // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
   periodic_dump_timer_.Stop();
   if (dump_thread)
     dump_thread->Stop();
 }
 
+bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
+  AutoLock lock(lock_);
+  if (!session_state_)
+    return false;
+  return session_state_->memory_dump_config().allowed_dump_modes.count(
+             dump_mode) != 0;
+}
+
 uint64_t MemoryDumpManager::GetTracingProcessId() const {
   return delegate_->GetTracingProcessId();
 }
@@ -611,14 +750,16 @@
 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
     MemoryDumpProvider* dump_provider,
     const char* name,
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner,
-    const MemoryDumpProvider::Options& options)
+    scoped_refptr<SequencedTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options,
+    bool whitelisted_for_background_mode)
     : dump_provider(dump_provider),
       name(name),
-      task_runner(task_runner),
+      task_runner(std::move(task_runner)),
       options(options),
       consecutive_failures(0),
-      disabled(false) {}
+      disabled(false),
+      whitelisted_for_background_mode(whitelisted_for_background_mode) {}
 
 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
 
@@ -637,14 +778,15 @@
 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
     MemoryDumpRequestArgs req_args,
     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
-    const scoped_refptr<MemoryDumpSessionState>& session_state,
+    scoped_refptr<MemoryDumpSessionState> session_state,
     MemoryDumpCallback callback,
-    const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner)
+    scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)
     : req_args(req_args),
-      session_state(session_state),
+      session_state(std::move(session_state)),
       callback(callback),
-      callback_task_runner(MessageLoop::current()->task_runner()),
-      dump_thread_task_runner(dump_thread_task_runner) {
+      dump_successful(true),
+      callback_task_runner(ThreadTaskRunnerHandle::Get()),
+      dump_thread_task_runner(std::move(dump_thread_task_runner)) {
   pending_dump_providers.reserve(dump_providers.size());
   pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
 }
@@ -653,14 +795,89 @@
 }
 
 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
-    GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
+    GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
+                                             const MemoryDumpArgs& dump_args) {
   auto iter = process_dumps.find(pid);
   if (iter == process_dumps.end()) {
-    scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state));
+    std::unique_ptr<ProcessMemoryDump> new_pmd(
+        new ProcessMemoryDump(session_state, dump_args));
     iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
   }
   return iter->second.get();
 }
 
+MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
+
+MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
+  Stop();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
+    const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
+  if (triggers_list.empty())
+    return;
+
+  // At the moment the periodic support is limited to at most one periodic
+  // trigger per dump mode. All intervals should be an integer multiple of the
+  // smallest interval specified.
+  periodic_dumps_count_ = 0;
+  uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
+  uint32_t light_dump_period_ms = 0;
+  uint32_t heavy_dump_period_ms = 0;
+  DCHECK_LE(triggers_list.size(), 3u);
+  auto* mdm = MemoryDumpManager::GetInstance();
+  for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
+    DCHECK_NE(0u, config.periodic_interval_ms);
+    switch (config.level_of_detail) {
+      case MemoryDumpLevelOfDetail::BACKGROUND:
+        DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
+        break;
+      case MemoryDumpLevelOfDetail::LIGHT:
+        DCHECK_EQ(0u, light_dump_period_ms);
+        DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
+        light_dump_period_ms = config.periodic_interval_ms;
+        break;
+      case MemoryDumpLevelOfDetail::DETAILED:
+        DCHECK_EQ(0u, heavy_dump_period_ms);
+        DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
+        heavy_dump_period_ms = config.periodic_interval_ms;
+        break;
+    }
+    min_timer_period_ms =
+        std::min(min_timer_period_ms, config.periodic_interval_ms);
+  }
+
+  DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
+  light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
+  DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
+  heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
+
+  timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
+               base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
+                          base::Unretained(this)));
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
+  if (IsRunning()) {
+    timer_.Stop();
+  }
+}
+
+bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
+  return timer_.IsRunning();
+}
+
+void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
+  MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
+  if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
+    level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
+  if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
+    level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
+  ++periodic_dumps_count_;
+
+  MemoryDumpManager::GetInstance()->RequestGlobalDump(
+      MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index b3880af..06b772c 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -40,6 +40,7 @@
 class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
  public:
   static const char* const kTraceCategory;
+  static const char* const kLogPrefix;
 
   // This value is returned as the tracing id of the child processes by
   // GetTracingProcessId() when tracing is not enabled.
@@ -69,19 +70,22 @@
   //  - name: a friendly name (duplicates allowed). Used for debugging and
   //      run-time profiling of memory-infra internals. Must be a long-lived
   //      C string.
-  //  - task_runner: if non-null, all the calls to |mdp| will be
-  //      issued on the given thread. Otherwise, |mdp| should be able to
-  //      handle calls on arbitrary threads.
+  //  - task_runner: either a SingleThreadTaskRunner or SequencedTaskRunner. All
+  //      the calls to |mdp| will be run on the given |task_runner|. If passed
+  //      null |mdp| should be able to handle calls on arbitrary threads.
   //  - options: extra optional arguments. See memory_dump_provider.h.
-  void RegisterDumpProvider(
+  void RegisterDumpProvider(MemoryDumpProvider* mdp,
+                            const char* name,
+                            scoped_refptr<SingleThreadTaskRunner> task_runner);
+  void RegisterDumpProvider(MemoryDumpProvider* mdp,
+                            const char* name,
+                            scoped_refptr<SingleThreadTaskRunner> task_runner,
+                            MemoryDumpProvider::Options options);
+  void RegisterDumpProviderWithSequencedTaskRunner(
       MemoryDumpProvider* mdp,
       const char* name,
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner);
-  void RegisterDumpProvider(
-      MemoryDumpProvider* mdp,
-      const char* name,
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner,
-      const MemoryDumpProvider::Options& options);
+      scoped_refptr<SequencedTaskRunner> task_runner,
+      MemoryDumpProvider::Options options);
   void UnregisterDumpProvider(MemoryDumpProvider* mdp);
 
   // Unregisters an unbound dump provider and takes care about its deletion
@@ -91,7 +95,8 @@
   //  - The |mdp| will be deleted at some point in the near future.
   //  - Its deletion will not happen concurrently with the OnMemoryDump() call.
   // Note that OnMemoryDump() calls can still happen after this method returns.
-  void UnregisterAndDeleteDumpProviderSoon(scoped_ptr<MemoryDumpProvider> mdp);
+  void UnregisterAndDeleteDumpProviderSoon(
+      std::unique_ptr<MemoryDumpProvider> mdp);
 
   // Requests a memory dump. The dump might happen or not depending on the
   // filters and categories specified when enabling tracing.
@@ -111,10 +116,14 @@
   void OnTraceLogEnabled() override;
   void OnTraceLogDisabled() override;
 
+  // Returns true if the dump mode is allowed for current tracing session.
+  bool IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode);
+
   // Returns the MemoryDumpSessionState object, which is shared by all the
   // ProcessMemoryDump and MemoryAllocatorDump instances through all the tracing
   // session lifetime.
-  const scoped_refptr<MemoryDumpSessionState>& session_state() const {
+  const scoped_refptr<MemoryDumpSessionState>& session_state_for_testing()
+      const {
     return session_state_;
   }
 
@@ -153,14 +162,15 @@
   //   inside ProcessMemoryDumpAsyncState is removed.
   // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
   // - If UnregisterDumpProvider() is called while a dump is in progress, the
-  //   MDPInfo is destroyed in the epilogue of ContinueAsyncProcessDump(), when
-  //   the copy inside ProcessMemoryDumpAsyncState is erase()-d.
+  //   MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
+  //   when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
   // - The non-const fields of MemoryDumpProviderInfo are safe to access only
-  //   in the |task_runner| thread, unless the thread has been destroyed.
+  //   on tasks running in the |task_runner|, unless the thread has been
+  //   destroyed.
   struct MemoryDumpProviderInfo
       : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
-    // Define a total order based on the thread (i.e. |task_runner|) affinity,
-    // so that all MDP belonging to the same thread are adjacent in the set.
+    // Define a total order based on the |task_runner| affinity, so that MDPs
+    // belonging to the same SequencedTaskRunner are adjacent in the set.
     struct Comparator {
       bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
                       const scoped_refptr<MemoryDumpProviderInfo>& b) const;
@@ -168,24 +178,24 @@
     using OrderedSet =
         std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
 
-    MemoryDumpProviderInfo(
-        MemoryDumpProvider* dump_provider,
-        const char* name,
-        const scoped_refptr<SingleThreadTaskRunner>& task_runner,
-        const MemoryDumpProvider::Options& options);
+    MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+                           const char* name,
+                           scoped_refptr<SequencedTaskRunner> task_runner,
+                           const MemoryDumpProvider::Options& options,
+                           bool whitelisted_for_background_mode);
 
     MemoryDumpProvider* const dump_provider;
 
     // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
     // nullptr in all other cases.
-    scoped_ptr<MemoryDumpProvider> owned_dump_provider;
+    std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
 
     // Human readable name, for debugging and testing. Not necessarily unique.
     const char* const name;
 
-    // The task_runner affinity. Can be nullptr, in which case the dump provider
+    // The task runner affinity. Can be nullptr, in which case the dump provider
     // will be invoked on |dump_thread_|.
-    const scoped_refptr<SingleThreadTaskRunner> task_runner;
+    const scoped_refptr<SequencedTaskRunner> task_runner;
 
     // The |options| arg passed to RegisterDumpProvider().
     const MemoryDumpProvider::Options options;
@@ -196,6 +206,9 @@
     // Flagged either by the auto-disable logic or during unregistration.
     bool disabled;
 
+    // True if the dump provider is whitelisted for background mode.
+    const bool whitelisted_for_background_mode;
+
    private:
     friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
     ~MemoryDumpProviderInfo();
@@ -204,25 +217,28 @@
   };
 
   // Holds the state of a process memory dump that needs to be carried over
-  // across threads in order to fulfil an asynchronous CreateProcessDump()
-  // request. At any time exactly one thread owns a ProcessMemoryDumpAsyncState.
+  // across task runners in order to fulfil an asynchronous CreateProcessDump()
+  // request. At any time exactly one task runner owns a
+  // ProcessMemoryDumpAsyncState.
   struct ProcessMemoryDumpAsyncState {
     ProcessMemoryDumpAsyncState(
         MemoryDumpRequestArgs req_args,
         const MemoryDumpProviderInfo::OrderedSet& dump_providers,
-        const scoped_refptr<MemoryDumpSessionState>& session_state,
+        scoped_refptr<MemoryDumpSessionState> session_state,
         MemoryDumpCallback callback,
-        const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner);
+        scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner);
     ~ProcessMemoryDumpAsyncState();
 
     // Gets or creates the memory dump container for the given target process.
-    ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(ProcessId pid);
+    ProcessMemoryDump* GetOrCreateMemoryDumpContainerForProcess(
+        ProcessId pid,
+        const MemoryDumpArgs& dump_args);
 
     // A map of ProcessId -> ProcessMemoryDump, one for each target process
     // being dumped from the current process. Typically each process dumps only
     // for itself, unless dump providers specify a different |target_process| in
     // MemoryDumpProvider::Options.
-    std::map<ProcessId, scoped_ptr<ProcessMemoryDump>> process_dumps;
+    std::map<ProcessId, std::unique_ptr<ProcessMemoryDump>> process_dumps;
 
     // The arguments passed to the initial CreateProcessDump() request.
     const MemoryDumpRequestArgs req_args;
@@ -238,6 +254,9 @@
     // Callback passed to the initial call to CreateProcessDump().
     MemoryDumpCallback callback;
 
+    // The |success| field that will be passed as argument to the |callback|.
+    bool dump_successful;
+
     // The thread on which FinalizeDumpAndAddToTrace() (and hence |callback|)
     // should be invoked. This is the thread on which the initial
     // CreateProcessDump() request was called.
@@ -254,6 +273,31 @@
     DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
   };
 
+  // Sets up periodic memory dump timers to start global dump requests based on
+  // the dump triggers from trace config.
+  class BASE_EXPORT PeriodicGlobalDumpTimer {
+   public:
+    PeriodicGlobalDumpTimer();
+    ~PeriodicGlobalDumpTimer();
+
+    void Start(const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
+                   triggers_list);
+    void Stop();
+
+    bool IsRunning();
+
+   private:
+    // Periodically called by the timer.
+    void RequestPeriodicGlobalDump();
+
+    RepeatingTimer timer_;
+    uint32_t periodic_dumps_count_;
+    uint32_t light_dump_rate_;
+    uint32_t heavy_dump_rate_;
+
+    DISALLOW_COPY_AND_ASSIGN(PeriodicGlobalDumpTimer);
+  };
+
   static const int kMaxConsecutiveFailuresCount;
   static const char* const kSystemAllocatorPoolName;
 
@@ -262,7 +306,10 @@
 
   static void SetInstanceForTesting(MemoryDumpManager* instance);
   static void FinalizeDumpAndAddToTrace(
-      scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+      std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+
+  // Enable heap profiling if kEnableHeapProfiling is specified.
+  void EnableHeapProfilingIfNeeded();
 
   // Internal, used only by MemoryDumpManagerDelegate.
   // Creates a memory dump for the current process and appends it to the trace.
@@ -271,17 +318,30 @@
   void CreateProcessDump(const MemoryDumpRequestArgs& args,
                          const MemoryDumpCallback& callback);
 
-  // Continues the ProcessMemoryDump started by CreateProcessDump(), hopping
-  // across threads as needed as specified by MDPs in RegisterDumpProvider().
-  void ContinueAsyncProcessDump(
-      ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+  // Calls InvokeOnMemoryDump() for the next MDP on the task runner specified by
+  // the MDP while registration. On failure to do so, skips and continues to
+  // next MDP.
+  void SetupNextMemoryDump(
+      std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+
+  // Invokes OnMemoryDump() of the next MDP and calls SetupNextMemoryDump() at
+  // the end to continue the ProcessMemoryDump. Should be called on the MDP task
+  // runner.
+  void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+
+  // Helper for RegierDumpProvider* functions.
+  void RegisterDumpProviderInternal(
+      MemoryDumpProvider* mdp,
+      const char* name,
+      scoped_refptr<SequencedTaskRunner> task_runner,
+      const MemoryDumpProvider::Options& options);
 
   // Helper for the public UnregisterDumpProvider* functions.
   void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
                                       bool take_mdp_ownership_and_delete_async);
 
-  // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by thread
-  // affinity (MDPs belonging to the same thread are adjacent).
+  // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
+  // runner affinity (MDPs belonging to the same task runners are adjacent).
   MemoryDumpProviderInfo::OrderedSet dump_providers_;
 
   // Shared among all the PMDs to keep state scoped to the tracing session.
@@ -301,10 +361,11 @@
   subtle::AtomicWord memory_tracing_enabled_;
 
   // For time-triggered periodic dumps.
-  RepeatingTimer periodic_dump_timer_;
+  PeriodicGlobalDumpTimer periodic_dump_timer_;
 
-  // Thread used for MemoryDumpProviders which don't specify a thread affinity.
-  scoped_ptr<Thread> dump_thread_;
+  // Thread used for MemoryDumpProviders which don't specify a task runner
+  // affinity.
+  std::unique_ptr<Thread> dump_thread_;
 
   // The unique id of the child process. This is created only for tracing and is
   // expected to be valid only when tracing is enabled.
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 03b3afa..d14093c 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -6,20 +6,24 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/bind_helpers.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
 #include "base/message_loop/message_loop.h"
 #include "base/run_loop.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/test/test_io_thread.h"
 #include "base/test/trace_event_analyzer.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_worker_pool.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_buffer.h"
 #include "base/trace_event/trace_config_memory_test_util.h"
@@ -45,15 +49,24 @@
   return arg.level_of_detail == MemoryDumpLevelOfDetail::LIGHT;
 }
 
+MATCHER(IsBackgroundDump, "") {
+  return arg.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND;
+}
+
 namespace {
 
+const char* kMDPName = "TestDumpProvider";
+const char* kWhitelistedMDPName = "WhitelistedTestDumpProvider";
+const char* const kTestMDPWhitelist[] = {kWhitelistedMDPName, nullptr};
+
 void RegisterDumpProvider(
     MemoryDumpProvider* mdp,
-    const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
-    const MemoryDumpProvider::Options& options) {
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options,
+    const char* name = kMDPName) {
   MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
   mdm->set_dumper_registrations_ignored_for_testing(false);
-  mdm->RegisterDumpProvider(mdp, "TestDumpProvider", task_runner, options);
+  mdm->RegisterDumpProvider(mdp, name, std::move(task_runner), options);
   mdm->set_dumper_registrations_ignored_for_testing(true);
 }
 
@@ -61,6 +74,17 @@
   RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
 }
 
+void RegisterDumpProviderWithSequencedTaskRunner(
+    MemoryDumpProvider* mdp,
+    scoped_refptr<base::SequencedTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options) {
+  MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
+  mdm->set_dumper_registrations_ignored_for_testing(false);
+  mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
+                                                   options);
+  mdm->set_dumper_registrations_ignored_for_testing(true);
+}
+
 void OnTraceDataCollected(Closure quit_closure,
                           trace_event::TraceResultBuffer* buffer,
                           const scoped_refptr<RefCountedString>& json,
@@ -90,6 +114,9 @@
     NOTREACHED();
     return MemoryDumpManager::kInvalidTracingProcessId;
   }
+
+  // Promote the CreateProcessDump to public so it can be used by test fixtures.
+  using MemoryDumpManagerDelegate::CreateProcessDump;
 };
 
 class MockMemoryDumpProvider : public MemoryDumpProvider {
@@ -98,7 +125,17 @@
   MOCK_METHOD2(OnMemoryDump,
                bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
 
-  MockMemoryDumpProvider() : enable_mock_destructor(false) {}
+  MockMemoryDumpProvider() : enable_mock_destructor(false) {
+    ON_CALL(*this, OnMemoryDump(_, _))
+        .WillByDefault(Invoke([](const MemoryDumpArgs&,
+                                 ProcessMemoryDump* pmd) -> bool {
+          // |session_state| should not be null under any circumstances when
+          // invoking a memory dump. The problem might arise in race conditions
+          // like crbug.com/600570 .
+          EXPECT_TRUE(pmd->session_state().get() != nullptr);
+          return true;
+        }));
+  }
   ~MockMemoryDumpProvider() override {
     if (enable_mock_destructor)
       Destructor();
@@ -107,6 +144,46 @@
   bool enable_mock_destructor;
 };
 
+class TestSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+  TestSequencedTaskRunner()
+      : worker_pool_(
+            new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
+        enabled_(true),
+        num_of_post_tasks_(0) {}
+
+  void set_enabled(bool value) { enabled_ = value; }
+  unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
+
+  bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+                                  const Closure& task,
+                                  TimeDelta delay) override {
+    NOTREACHED();
+    return false;
+  }
+
+  bool PostDelayedTask(const tracked_objects::Location& from_here,
+                       const Closure& task,
+                       TimeDelta delay) override {
+    num_of_post_tasks_++;
+    if (enabled_)
+      return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
+    return false;
+  }
+
+  bool RunsTasksOnCurrentThread() const override {
+    return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
+  }
+
+ private:
+  ~TestSequencedTaskRunner() override {}
+
+  scoped_refptr<SequencedWorkerPool> worker_pool_;
+  const SequencedWorkerPool::SequenceToken token_;
+  bool enabled_;
+  unsigned num_of_post_tasks_;
+};
+
 class MemoryDumpManagerTest : public testing::Test {
  public:
   MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -149,7 +226,7 @@
     RunLoop run_loop;
     MemoryDumpCallback callback =
         Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
-             MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+             ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
     mdm_->RequestGlobalDump(dump_type, level_of_detail, callback);
     run_loop.Run();
   }
@@ -175,12 +252,12 @@
   }
 
   const MemoryDumpProvider::Options kDefaultOptions;
-  scoped_ptr<MemoryDumpManager> mdm_;
-  scoped_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+  std::unique_ptr<MemoryDumpManager> mdm_;
+  std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
   bool last_callback_success_;
 
  private:
-  scoped_ptr<MessageLoop> message_loop_;
+  std::unique_ptr<MessageLoop> message_loop_;
 
   // We want our singleton torn down after each test.
   ShadowingAtExitManager at_exit_manager_;
@@ -262,7 +339,8 @@
   RegisterDumpProvider(&mdp2);
 
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
-  const MemoryDumpSessionState* session_state = mdm_->session_state().get();
+  const MemoryDumpSessionState* session_state =
+      mdm_->session_state_for_testing().get();
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
   EXPECT_CALL(mdp1, OnMemoryDump(_, _))
       .Times(2)
@@ -387,19 +465,19 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   const uint32_t kNumInitialThreads = 8;
 
-  std::vector<scoped_ptr<Thread>> threads;
-  std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<Thread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
 
   // Create the threads and setup the expectations. Given that at each iteration
   // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
   // invoked a number of times equal to its index.
   for (uint32_t i = kNumInitialThreads; i > 0; --i) {
-    threads.push_back(make_scoped_ptr(new Thread("test thread")));
-    auto thread = threads.back().get();
+    threads.push_back(WrapUnique(new Thread("test thread")));
+    auto* thread = threads.back().get();
     thread->Start();
     scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
-    mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
-    auto mdp = mdps.back().get();
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
+    auto* mdp = mdps.back().get();
     RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
         .Times(i)
@@ -438,6 +516,50 @@
   DisableTracing();
 }
 
+// Check that the memory dump calls are always posted on task runner for
+// SequencedTaskRunner case and that the dump provider gets disabled when
+// PostTask fails, but the dump still succeeds.
+TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
+  InitializeMemoryDumpManager(false /* is_coordinator */);
+  std::vector<MockMemoryDumpProvider> mdps(3);
+  scoped_refptr<TestSequencedTaskRunner> task_runner1(
+      make_scoped_refptr(new TestSequencedTaskRunner()));
+  scoped_refptr<TestSequencedTaskRunner> task_runner2(
+      make_scoped_refptr(new TestSequencedTaskRunner()));
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[0], task_runner1,
+                                              kDefaultOptions);
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[1], task_runner2,
+                                              kDefaultOptions);
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[2], task_runner2,
+                                              kDefaultOptions);
+  // |mdps[0]| should be disabled permanently after first dump.
+  EXPECT_CALL(mdps[0], OnMemoryDump(_, _)).Times(0);
+  EXPECT_CALL(mdps[1], OnMemoryDump(_, _)).Times(2);
+  EXPECT_CALL(mdps[2], OnMemoryDump(_, _)).Times(2);
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
+
+  EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+  task_runner1->set_enabled(false);
+  last_callback_success_ = false;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::DETAILED);
+  // Tasks should be individually posted even if |mdps[1]| and |mdps[2]| belong
+  // to same task runner.
+  EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
+  EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
+  EXPECT_TRUE(last_callback_success_);
+
+  task_runner1->set_enabled(true);
+  last_callback_success_ = false;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::DETAILED);
+  EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
+  EXPECT_EQ(4u, task_runner2->no_of_post_tasks());
+  EXPECT_TRUE(last_callback_success_);
+  DisableTracing();
+}
+
 // Checks that providers get disabled after 3 consecutive failures, but not
 // otherwise (e.g., if interleaved).
 TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
@@ -548,13 +670,13 @@
 // dumping from a different thread than the dumping thread.
 TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
-  std::vector<scoped_ptr<TestIOThread>> threads;
-  std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<TestIOThread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
 
   for (int i = 0; i < 2; i++) {
     threads.push_back(
-        make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart)));
-    mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
+        WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
                          kDefaultOptions);
   }
@@ -563,7 +685,7 @@
 
   // When OnMemoryDump is called on either of the dump providers, it will
   // unregister the other one.
-  for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+  for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
     int other_idx = (mdps.front() == mdp);
     TestIOThread* other_thread = threads[other_idx].get();
     MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
@@ -598,13 +720,13 @@
 // its dump provider should be skipped but the dump itself should succeed.
 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
-  std::vector<scoped_ptr<TestIOThread>> threads;
-  std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<TestIOThread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
 
   for (int i = 0; i < 2; i++) {
     threads.push_back(
-        make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart)));
-    mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
+        WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
                          kDefaultOptions);
   }
@@ -613,7 +735,7 @@
 
   // When OnMemoryDump is called on either of the dump providers, it will
   // tear down the thread of the other one.
-  for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+  for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
     int other_idx = (mdps.front() == mdp);
     TestIOThread* other_thread = threads[other_idx].get();
     auto on_dump = [other_thread, &on_memory_dump_call_count](
@@ -782,11 +904,13 @@
 // Tests against race conditions that might arise when disabling tracing in the
 // middle of a global memory dump.
 TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) {
-  base::WaitableEvent tracing_disabled_event(false, false);
+  base::WaitableEvent tracing_disabled_event(
+      WaitableEvent::ResetPolicy::AUTOMATIC,
+      WaitableEvent::InitialState::NOT_SIGNALED);
   InitializeMemoryDumpManager(false /* is_coordinator */);
 
   // Register a bound dump provider.
-  scoped_ptr<Thread> mdp_thread(new Thread("test thread"));
+  std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
   mdp_thread->Start();
   MockMemoryDumpProvider mdp_with_affinity;
   RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(),
@@ -819,16 +943,50 @@
   RunLoop run_loop;
   MemoryDumpCallback callback =
       Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
-           MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+           ThreadTaskRunnerHandle::Get(), run_loop.QuitClosure());
   mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
                           MemoryDumpLevelOfDetail::DETAILED, callback);
   DisableTracing();
   tracing_disabled_event.Signal();
   run_loop.Run();
 
-  // RequestGlobalMemoryDump() should still suceed even if some threads were
-  // torn down during the dump.
-  EXPECT_TRUE(last_callback_success_);
+  EXPECT_FALSE(last_callback_success_);
+}
+
+// Tests against race conditions that can happen if tracing is disabled before
+// the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
+TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
+  base::WaitableEvent tracing_disabled_event(
+      WaitableEvent::ResetPolicy::AUTOMATIC,
+      WaitableEvent::InitialState::NOT_SIGNALED);
+  InitializeMemoryDumpManager(false /* is_coordinator */);
+
+  std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
+  mdp_thread->Start();
+
+  // Create both same-thread MDP and another MDP with dedicated thread
+  MockMemoryDumpProvider mdp1;
+  RegisterDumpProvider(&mdp1);
+  MockMemoryDumpProvider mdp2;
+  RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
+  EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
+      .WillOnce(Invoke([this](const MemoryDumpRequestArgs& args,
+                              const MemoryDumpCallback& callback) {
+        DisableTracing();
+        delegate_->CreateProcessDump(args, callback);
+      }));
+
+  // If tracing is disabled for current session CreateProcessDump() should NOT
+  // request dumps from providers. Real-world regression: crbug.com/600570 .
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
+
+  last_callback_success_ = true;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::DETAILED);
+  EXPECT_FALSE(last_callback_success_);
 }
 
 TEST_F(MemoryDumpManagerTest, DumpOnBehalfOfOtherProcess) {
@@ -872,7 +1030,7 @@
   buffer.Finish();
 
   // Analyze the JSON.
-  scoped_ptr<trace_analyzer::TraceAnalyzer> analyzer = make_scoped_ptr(
+  std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer = WrapUnique(
       trace_analyzer::TraceAnalyzer::Create(trace_output.json_output));
   trace_analyzer::TraceEventVector events;
   analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP),
@@ -893,9 +1051,9 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   static const int kNumProviders = 3;
   int dtor_count = 0;
-  std::vector<scoped_ptr<MemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<MemoryDumpProvider>> mdps;
   for (int i = 0; i < kNumProviders; ++i) {
-    scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+    std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
     mdp->enable_mock_destructor = true;
     EXPECT_CALL(*mdp, Destructor())
         .WillOnce(Invoke([&dtor_count]() { dtor_count++; }));
@@ -918,7 +1076,7 @@
 // happen on the same thread (the MemoryDumpManager utility thread).
 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
-  scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+  std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
   mdp->enable_mock_destructor = true;
   RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
 
@@ -932,7 +1090,7 @@
         base::Bind(
             &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
             base::Unretained(MemoryDumpManager::GetInstance()),
-            base::Passed(scoped_ptr<MemoryDumpProvider>(std::move(mdp)))));
+            base::Passed(std::unique_ptr<MemoryDumpProvider>(std::move(mdp)))));
     thread_for_unregistration.Stop();
     return true;
   };
@@ -954,5 +1112,60 @@
   DisableTracing();
 }
 
+TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
+  InitializeMemoryDumpManager(false /* is_coordinator */);
+  SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+  std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
+  RegisterDumpProvider(mdp1.get());
+  std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
+  RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
+                       kWhitelistedMDPName);
+
+  EXPECT_CALL(*mdp1, OnMemoryDump(_, _)).Times(0);
+  EXPECT_CALL(*mdp2, OnMemoryDump(_, _)).Times(1).WillOnce(Return(true));
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
+
+  EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+  EXPECT_FALSE(IsPeriodicDumpingEnabled());
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::BACKGROUND);
+  DisableTracing();
+}
+
+TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
+  InitializeMemoryDumpManager(true /* is_coordinator */);
+
+  RunLoop run_loop;
+  auto quit_closure = run_loop.QuitClosure();
+
+  testing::InSequence sequence;
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+      .Times(5);
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(IsBackgroundDump(), _))
+      .WillOnce(Invoke([quit_closure](const MemoryDumpRequestArgs& args,
+                                      const MemoryDumpCallback& callback) {
+        ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, quit_closure);
+      }));
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(AnyNumber());
+
+  EnableTracingWithTraceConfig(
+      TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+          1 /* period_ms */));
+
+  // Only background mode dumps should be allowed with the trace config.
+  last_callback_success_ = false;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::LIGHT);
+  EXPECT_FALSE(last_callback_success_);
+  last_callback_success_ = false;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::DETAILED);
+  EXPECT_FALSE(last_callback_success_);
+
+  ASSERT_TRUE(IsPeriodicDumpingEnabled());
+  run_loop.Run();
+  DisableTracing();
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index 2ce919d..2c50286 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -15,25 +15,25 @@
 
 class ProcessMemoryDump;
 
-// Args passed to OnMemoryDump(). This is to avoid rewriting all the subclasses
-// in the codebase when extending the MemoryDumpProvider API.
-struct MemoryDumpArgs {
-  MemoryDumpLevelOfDetail level_of_detail;
-};
-
 // The contract interface that memory dump providers must implement.
 class BASE_EXPORT MemoryDumpProvider {
  public:
   // Optional arguments for MemoryDumpManager::RegisterDumpProvider().
   struct Options {
-    Options() : target_pid(kNullProcessId) {}
-    explicit Options(ProcessId target_pid) : target_pid(target_pid) {}
+    Options()
+        : target_pid(kNullProcessId),
+          dumps_on_single_thread_task_runner(false) {}
 
     // If the dump provider generates dumps on behalf of another process,
-    // |target_process| contains the pid of that process.
+    // |target_pid| contains the pid of that process.
     // The default value is kNullProcessId, which means that the dump provider
     // generates dumps for the current process.
     ProcessId target_pid;
+
+    // |dumps_on_single_thread_task_runner| is true if the dump provider runs on
+    // a SingleThreadTaskRunner, which is usually the case. It is faster to run
+    // all providers that run on the same thread together without thread hops.
+    bool dumps_on_single_thread_task_runner;
   };
 
   virtual ~MemoryDumpProvider() {}
@@ -50,7 +50,7 @@
 
   // Called by the MemoryDumpManager when an allocator should start or stop
   // collecting extensive allocation data, if supported.
-  virtual void OnHeapProfilingEnabled(bool /* enabled */) {}
+  virtual void OnHeapProfilingEnabled(bool) {}
 
  protected:
   MemoryDumpProvider() {}
diff --git a/base/trace_event/memory_dump_request_args.cc b/base/trace_event/memory_dump_request_args.cc
index 48b5ba6..e6c5b87 100644
--- a/base/trace_event/memory_dump_request_args.cc
+++ b/base/trace_event/memory_dump_request_args.cc
@@ -28,6 +28,8 @@
 const char* MemoryDumpLevelOfDetailToString(
     const MemoryDumpLevelOfDetail& level_of_detail) {
   switch (level_of_detail) {
+    case MemoryDumpLevelOfDetail::BACKGROUND:
+      return "background";
     case MemoryDumpLevelOfDetail::LIGHT:
       return "light";
     case MemoryDumpLevelOfDetail::DETAILED:
@@ -39,6 +41,8 @@
 
 MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
     const std::string& str) {
+  if (str == "background")
+    return MemoryDumpLevelOfDetail::BACKGROUND;
   if (str == "light")
     return MemoryDumpLevelOfDetail::LIGHT;
   if (str == "detailed")
diff --git a/base/trace_event/memory_dump_request_args.h b/base/trace_event/memory_dump_request_args.h
index 00d560e..f3ff9d8 100644
--- a/base/trace_event/memory_dump_request_args.h
+++ b/base/trace_event/memory_dump_request_args.h
@@ -28,13 +28,25 @@
 };
 
 // Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
-// MemoryDumpProvider instances must guarantee that level of detail does not
-// affect the total size reported in the root node, but only the granularity of
-// the child MemoryAllocatorDump(s).
-enum class MemoryDumpLevelOfDetail {
-  LIGHT,           // Few entries, typically a fixed number, per dump.
-  DETAILED,        // Unrestricted amount of entries per dump.
-  LAST = DETAILED  // For IPC Macros.
+enum class MemoryDumpLevelOfDetail : uint32_t {
+  FIRST,
+
+  // For background tracing mode. The dump time is quick, and typically just the
+  // totals are expected. Suballocations need not be specified. Dump name must
+  // contain only pre-defined strings and string arguments cannot be added.
+  BACKGROUND = FIRST,
+
+  // For the levels below, MemoryDumpProvider instances must guarantee that the
+  // total size reported in the root node is consistent. Only the granularity of
+  // the child MemoryAllocatorDump(s) differs with the levels.
+
+  // Few entries, typically a fixed number, per dump.
+  LIGHT,
+
+  // Unrestricted amount of entries per dump.
+  DETAILED,
+
+  LAST = DETAILED
 };
 
 // Initial request arguments for a global memory dump. (see
@@ -49,6 +61,13 @@
   MemoryDumpLevelOfDetail level_of_detail;
 };
 
+// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
+// providers. Dump providers are expected to read the args for creating dumps.
+struct MemoryDumpArgs {
+  // Specifies how detailed the dumps should be.
+  MemoryDumpLevelOfDetail level_of_detail;
+};
+
 using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
 
 BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
diff --git a/base/trace_event/memory_dump_session_state.cc b/base/trace_event/memory_dump_session_state.cc
index 5aa79b1..b3d9a8c 100644
--- a/base/trace_event/memory_dump_session_state.cc
+++ b/base/trace_event/memory_dump_session_state.cc
@@ -7,13 +7,25 @@
 namespace base {
 namespace trace_event {
 
-MemoryDumpSessionState::MemoryDumpSessionState(
-    const scoped_refptr<StackFrameDeduplicator>& stack_frame_deduplicator,
-    const scoped_refptr<TypeNameDeduplicator>& type_name_deduplicator)
-    : stack_frame_deduplicator_(stack_frame_deduplicator),
-      type_name_deduplicator_(type_name_deduplicator) {}
+MemoryDumpSessionState::MemoryDumpSessionState() {}
 
-MemoryDumpSessionState::~MemoryDumpSessionState() {
+MemoryDumpSessionState::~MemoryDumpSessionState() {}
+
+void MemoryDumpSessionState::SetStackFrameDeduplicator(
+    std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
+  DCHECK(!stack_frame_deduplicator_);
+  stack_frame_deduplicator_ = std::move(stack_frame_deduplicator);
+}
+
+void MemoryDumpSessionState::SetTypeNameDeduplicator(
+    std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator) {
+  DCHECK(!type_name_deduplicator_);
+  type_name_deduplicator_ = std::move(type_name_deduplicator);
+}
+
+void MemoryDumpSessionState::SetMemoryDumpConfig(
+    const TraceConfig::MemoryDumpConfig& config) {
+  memory_dump_config_ = config;
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/memory_dump_session_state.h b/base/trace_event/memory_dump_session_state.h
index 6834471..f199ec1 100644
--- a/base/trace_event/memory_dump_session_state.h
+++ b/base/trace_event/memory_dump_session_state.h
@@ -5,10 +5,12 @@
 #ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
 #define BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
 
+#include <memory>
+
 #include "base/base_export.h"
-#include "base/memory/ref_counted.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/trace_config.h"
 
 namespace base {
 namespace trace_event {
@@ -18,33 +20,47 @@
 class BASE_EXPORT MemoryDumpSessionState
     : public RefCountedThreadSafe<MemoryDumpSessionState> {
  public:
-  MemoryDumpSessionState(
-      const scoped_refptr<StackFrameDeduplicator>& stack_frame_deduplicator,
-      const scoped_refptr<TypeNameDeduplicator>& type_name_deduplicator);
+  MemoryDumpSessionState();
 
   // Returns the stack frame deduplicator that should be used by memory dump
   // providers when doing a heap dump.
-  StackFrameDeduplicator* stack_frame_deduplicator() {
+  StackFrameDeduplicator* stack_frame_deduplicator() const {
     return stack_frame_deduplicator_.get();
   }
 
+  void SetStackFrameDeduplicator(
+      std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator);
+
   // Returns the type name deduplicator that should be used by memory dump
   // providers when doing a heap dump.
-  TypeNameDeduplicator* type_name_deduplicator() {
+  TypeNameDeduplicator* type_name_deduplicator() const {
     return type_name_deduplicator_.get();
   }
 
+  void SetTypeNameDeduplicator(
+      std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
+
+  const TraceConfig::MemoryDumpConfig& memory_dump_config() const {
+    return memory_dump_config_;
+  }
+
+  void SetMemoryDumpConfig(const TraceConfig::MemoryDumpConfig& config);
+
  private:
   friend class RefCountedThreadSafe<MemoryDumpSessionState>;
   ~MemoryDumpSessionState();
 
   // Deduplicates backtraces in heap dumps so they can be written once when the
   // trace is finalized.
-  scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator_;
+  std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator_;
 
   // Deduplicates type names in heap dumps so they can be written once when the
   // trace is finalized.
-  scoped_refptr<TypeNameDeduplicator> type_name_deduplicator_;
+  std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
+
+  // The memory dump config, copied at the time when the tracing session was
+  // started.
+  TraceConfig::MemoryDumpConfig memory_dump_config_;
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/memory_infra_background_whitelist.cc b/base/trace_event/memory_infra_background_whitelist.cc
new file mode 100644
index 0000000..aed187f
--- /dev/null
+++ b/base/trace_event/memory_infra_background_whitelist.cc
@@ -0,0 +1,131 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_infra_background_whitelist.h"
+
+#include <ctype.h>
+#include <string.h>
+
+#include <string>
+
+namespace base {
+namespace trace_event {
+namespace {
+
+// The names of dump providers whitelisted for background tracing. Dump
+// providers can be added here only if the background mode dump has very
+// less performance and memory overhead.
+const char* const kDumpProviderWhitelist[] = {
+    "BlinkGC",
+    "ChildDiscardableSharedMemoryManager",
+    "DOMStorage",
+    "HostDiscardableSharedMemoryManager",
+    "IndexedDBBackingStore",
+    "JavaHeap",
+    "LeveldbValueStore",
+    "Malloc",
+    "PartitionAlloc",
+    "ProcessMemoryMetrics",
+    "Skia",
+    "Sql",
+    "V8Isolate",
+    "WinHeap",
+    nullptr  // End of list marker.
+};
+
+// A list of string names that are allowed for the memory allocator dumps in
+// background mode.
+const char* const kAllocatorDumpNameWhitelist[] = {
+    "blink_gc",
+    "blink_gc/allocated_objects",
+    "discardable",
+    "discardable/child_0x?",
+    "dom_storage/0x?/cache_size",
+    "dom_storage/session_storage_0x?",
+    "java_heap",
+    "java_heap/allocated_objects",
+    "leveldb/index_db/0x?",
+    "leveldb/value_store/Extensions.Database.Open.Settings/0x?",
+    "leveldb/value_store/Extensions.Database.Open.Rules/0x?",
+    "leveldb/value_store/Extensions.Database.Open.State/0x?",
+    "leveldb/value_store/Extensions.Database.Open/0x?",
+    "leveldb/value_store/Extensions.Database.Restore/0x?",
+    "leveldb/value_store/Extensions.Database.Value.Restore/0x?",
+    "malloc",
+    "malloc/allocated_objects",
+    "malloc/metadata_fragmentation_caches",
+    "partition_alloc/allocated_objects",
+    "partition_alloc/partitions",
+    "partition_alloc/partitions/buffer",
+    "partition_alloc/partitions/fast_malloc",
+    "partition_alloc/partitions/layout",
+    "skia/sk_glyph_cache",
+    "skia/sk_resource_cache",
+    "sqlite",
+    "v8/isolate_0x?/heap_spaces",
+    "v8/isolate_0x?/heap_spaces/code_space",
+    "v8/isolate_0x?/heap_spaces/large_object_space",
+    "v8/isolate_0x?/heap_spaces/map_space",
+    "v8/isolate_0x?/heap_spaces/new_space",
+    "v8/isolate_0x?/heap_spaces/old_space",
+    "v8/isolate_0x?/heap_spaces/other_spaces",
+    "v8/isolate_0x?/malloc",
+    "v8/isolate_0x?/zapped_for_debug",
+    "winheap",
+    "winheap/allocated_objects",
+    nullptr  // End of list marker.
+};
+
+const char* const* g_dump_provider_whitelist = kDumpProviderWhitelist;
+const char* const* g_allocator_dump_name_whitelist =
+    kAllocatorDumpNameWhitelist;
+
+}  // namespace
+
+bool IsMemoryDumpProviderWhitelisted(const char* mdp_name) {
+  for (size_t i = 0; g_dump_provider_whitelist[i] != nullptr; ++i) {
+    if (strcmp(mdp_name, g_dump_provider_whitelist[i]) == 0)
+      return true;
+  }
+  return false;
+}
+
+bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
+  // Remove special characters, numbers (including hexadecimal which are marked
+  // by '0x') from the given string.
+  const size_t length = name.size();
+  std::string stripped_str;
+  stripped_str.reserve(length);
+  bool parsing_hex = false;
+  for (size_t i = 0; i < length; ++i) {
+    if (parsing_hex && isxdigit(name[i]))
+      continue;
+    parsing_hex = false;
+    if (i + 1 < length && name[i] == '0' && name[i + 1] == 'x') {
+      parsing_hex = true;
+      stripped_str.append("0x?");
+      ++i;
+    } else {
+      stripped_str.push_back(name[i]);
+    }
+  }
+
+  for (size_t i = 0; g_allocator_dump_name_whitelist[i] != nullptr; ++i) {
+    if (stripped_str == g_allocator_dump_name_whitelist[i]) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void SetDumpProviderWhitelistForTesting(const char* const* list) {
+  g_dump_provider_whitelist = list;
+}
+
+void SetAllocatorDumpNameWhitelistForTesting(const char* const* list) {
+  g_allocator_dump_name_whitelist = list;
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/memory_infra_background_whitelist.h b/base/trace_event/memory_infra_background_whitelist.h
new file mode 100644
index 0000000..b8d704a
--- /dev/null
+++ b/base/trace_event/memory_infra_background_whitelist.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+#define BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
+
+// This file contains the whitelists for background mode to limit the tracing
+// overhead and remove sensitive information from traces.
+
+#include <string>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace trace_event {
+
+// Checks if the given |mdp_name| is in the whitelist.
+bool BASE_EXPORT IsMemoryDumpProviderWhitelisted(const char* mdp_name);
+
+// Checks if the given |name| matches any of the whitelisted patterns.
+bool BASE_EXPORT IsMemoryAllocatorDumpNameWhitelisted(const std::string& name);
+
+// The whitelist is replaced with the given list for tests. The last element of
+// the list must be nullptr.
+void BASE_EXPORT SetDumpProviderWhitelistForTesting(const char* const* list);
+void BASE_EXPORT
+SetAllocatorDumpNameWhitelistForTesting(const char* const* list);
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_WHITELIST_H_
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index ae60bb0..8269892 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -5,17 +5,30 @@
 #include "base/trace_event/process_memory_dump.h"
 
 #include <errno.h>
+
 #include <vector>
 
+#include "base/memory/ptr_util.h"
 #include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
 #include "base/trace_event/process_memory_totals.h"
 #include "base/trace_event/trace_event_argument.h"
 #include "build/build_config.h"
 
+#if defined(OS_IOS)
+#include <sys/sysctl.h>
+#endif
+
 #if defined(OS_POSIX)
 #include <sys/mman.h>
 #endif
 
+#if defined(OS_WIN)
+#include <Psapi.h>
+#endif
+
 namespace base {
 namespace trace_event {
 
@@ -28,101 +41,164 @@
   return "global/" + guid.ToString();
 }
 
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
+  return (mapped_size + page_size - 1) / page_size;
+}
+#endif
+
 }  // namespace
 
+// static
+bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
+
 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
 // static
+size_t ProcessMemoryDump::GetSystemPageSize() {
+#if defined(OS_IOS)
+  // On iOS, getpagesize() returns the user page sizes, but for allocating
+  // arrays for mincore(), kernel page sizes is needed. sysctlbyname() should
+  // be used for this. Refer to crbug.com/542671 and Apple rdar://23651782
+  int pagesize;
+  size_t pagesize_len;
+  int status = sysctlbyname("vm.pagesize", NULL, &pagesize_len, nullptr, 0);
+  if (!status && pagesize_len == sizeof(pagesize)) {
+    if (!sysctlbyname("vm.pagesize", &pagesize, &pagesize_len, nullptr, 0))
+      return pagesize;
+  }
+  LOG(ERROR) << "sysctlbyname(\"vm.pagesize\") failed.";
+  // Falls back to getpagesize() although it may be wrong in certain cases.
+#endif  // defined(OS_IOS)
+  return base::GetPageSize();
+}
+
+// static
 size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
                                              size_t mapped_size) {
-  const size_t page_size = GetPageSize();
+  const size_t page_size = GetSystemPageSize();
   const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
   DCHECK_EQ(0u, start_pointer % page_size);
 
-  // This function allocates a char vector of size number of pages in the given
-  // mapped_size. To avoid allocating a large array, the memory is split into
-  // chunks. Maximum size of vector allocated, will be
-  // kPageChunkSize / page_size.
-  const size_t kMaxChunkSize = 32 * 1024 * 1024;
   size_t offset = 0;
   size_t total_resident_size = 0;
-  int result = 0;
+  bool failure = false;
+
+  // An array as large as number of pages in memory segment needs to be passed
+  // to the query function. To avoid allocating a large array, the given block
+  // of memory is split into chunks of size |kMaxChunkSize|.
+  const size_t kMaxChunkSize = 8 * 1024 * 1024;
+  size_t max_vec_size =
+      GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
+#if defined(OS_MACOSX) || defined(OS_IOS)
+  std::unique_ptr<char[]> vec(new char[max_vec_size]);
+#elif defined(OS_WIN)
+  std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
+      new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
+#elif defined(OS_POSIX)
+  std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
+#endif
+
   while (offset < mapped_size) {
-    void* chunk_start = reinterpret_cast<void*>(start_pointer + offset);
+    uintptr_t chunk_start = (start_pointer + offset);
     const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
-    const size_t page_count = (chunk_size + page_size - 1) / page_size;
+    const size_t page_count = GetSystemPageCount(chunk_size, page_size);
     size_t resident_page_count = 0;
 
 #if defined(OS_MACOSX) || defined(OS_IOS)
-    std::vector<char> vec(page_count + 1);
     // mincore in MAC does not fail with EAGAIN.
-    result = mincore(chunk_start, chunk_size, vec.data());
-    if (result)
-      break;
-
+    failure =
+        !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
     for (size_t i = 0; i < page_count; i++)
       resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
-#else   // defined(OS_MACOSX) || defined(OS_IOS)
-    std::vector<unsigned char> vec(page_count + 1);
-    int error_counter = 0;
-    // HANDLE_EINTR tries for 100 times. So following the same pattern.
-    do {
-      result = mincore(chunk_start, chunk_size, vec.data());
-    } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
-    if (result)
-      break;
+#elif defined(OS_WIN)
+    for (size_t i = 0; i < page_count; i++) {
+      vec[i].VirtualAddress =
+          reinterpret_cast<void*>(chunk_start + i * page_size);
+    }
+    DWORD vec_size = static_cast<DWORD>(
+        page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
+    failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
 
     for (size_t i = 0; i < page_count; i++)
-      resident_page_count += vec[i];
-#endif  // defined(OS_MACOSX) || defined(OS_IOS)
+      resident_page_count += vec[i].VirtualAttributes.Valid;
+#elif defined(OS_POSIX)
+    int error_counter = 0;
+    int result = 0;
+    // HANDLE_EINTR tries for 100 times. So following the same pattern.
+    do {
+      result =
+          mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
+    } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
+    failure = !!result;
+
+    for (size_t i = 0; i < page_count; i++)
+      resident_page_count += vec[i] & 1;
+#endif
+
+    if (failure)
+      break;
 
     total_resident_size += resident_page_count * page_size;
     offset += kMaxChunkSize;
   }
 
-  DCHECK_EQ(0, result);
-  if (result) {
+  DCHECK(!failure);
+  if (failure) {
     total_resident_size = 0;
-    LOG(ERROR) << "mincore() call failed. The resident size is invalid";
+    LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
   }
   return total_resident_size;
 }
 #endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
 
 ProcessMemoryDump::ProcessMemoryDump(
-    const scoped_refptr<MemoryDumpSessionState>& session_state)
+    scoped_refptr<MemoryDumpSessionState> session_state,
+    const MemoryDumpArgs& dump_args)
     : has_process_totals_(false),
       has_process_mmaps_(false),
-      session_state_(session_state) {
-}
+      session_state_(std::move(session_state)),
+      dump_args_(dump_args) {}
 
-ProcessMemoryDump::~ProcessMemoryDump() {
-}
+ProcessMemoryDump::~ProcessMemoryDump() {}
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name) {
-  MemoryAllocatorDump* mad = new MemoryAllocatorDump(absolute_name, this);
-  AddAllocatorDumpInternal(mad);  // Takes ownership of |mad|.
-  return mad;
+  return AddAllocatorDumpInternal(
+      WrapUnique(new MemoryAllocatorDump(absolute_name, this)));
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name,
     const MemoryAllocatorDumpGuid& guid) {
-  MemoryAllocatorDump* mad = new MemoryAllocatorDump(absolute_name, this, guid);
-  AddAllocatorDumpInternal(mad);  // Takes ownership of |mad|.
-  return mad;
+  return AddAllocatorDumpInternal(
+      WrapUnique(new MemoryAllocatorDump(absolute_name, this, guid)));
 }
 
-void ProcessMemoryDump::AddAllocatorDumpInternal(MemoryAllocatorDump* mad) {
-  DCHECK_EQ(0ul, allocator_dumps_.count(mad->absolute_name()));
-  allocator_dumps_storage_.push_back(mad);
-  allocator_dumps_[mad->absolute_name()] = mad;
+MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
+    std::unique_ptr<MemoryAllocatorDump> mad) {
+  // In background mode return the black hole dump, if invalid dump name is
+  // given.
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+      !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
+    return GetBlackHoleMad();
+  }
+
+  auto insertion_result = allocator_dumps_.insert(
+      std::make_pair(mad->absolute_name(), std::move(mad)));
+  MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
+  DCHECK(insertion_result.second) << "Duplicate name: "
+                                  << inserted_mad->absolute_name();
+  return inserted_mad;
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
     const std::string& absolute_name) const {
   auto it = allocator_dumps_.find(absolute_name);
-  return it == allocator_dumps_.end() ? nullptr : it->second;
+  if (it != allocator_dumps_.end())
+    return it->second.get();
+  if (black_hole_mad_)
+    return black_hole_mad_.get();
+  return nullptr;
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
@@ -133,12 +209,34 @@
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
     const MemoryAllocatorDumpGuid& guid) {
+  // Global dumps are disabled in background mode.
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+    return GetBlackHoleMad();
+
   // A shared allocator dump can be shared within a process and the guid could
   // have been created already.
-  MemoryAllocatorDump* allocator_dump = GetSharedGlobalAllocatorDump(guid);
-  return allocator_dump ? allocator_dump
-                        : CreateAllocatorDump(
-                              GetSharedGlobalAllocatorDumpName(guid), guid);
+  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+  if (mad) {
+    // The weak flag is cleared because this method should create a non-weak
+    // dump.
+    mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
+    return mad;
+  }
+  return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
+    const MemoryAllocatorDumpGuid& guid) {
+  // Global dumps are disabled in background mode.
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+    return GetBlackHoleMad();
+
+  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+  if (mad)
+    return mad;
+  mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+  mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
+  return mad;
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
@@ -146,10 +244,21 @@
   return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
 }
 
-void ProcessMemoryDump::AddHeapDump(const std::string& absolute_name,
-                                    scoped_refptr<TracedValue> heap_dump) {
-  DCHECK_EQ(0ul, heap_dumps_.count(absolute_name));
-  heap_dumps_[absolute_name] = heap_dump;
+void ProcessMemoryDump::DumpHeapUsage(
+    const base::hash_map<base::trace_event::AllocationContext,
+        base::trace_event::AllocationMetrics>& metrics_by_context,
+    base::trace_event::TraceEventMemoryOverhead& overhead,
+    const char* allocator_name) {
+  if (!metrics_by_context.empty()) {
+    DCHECK_EQ(0ul, heap_dumps_.count(allocator_name));
+    std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
+        metrics_by_context, *session_state());
+    heap_dumps_[allocator_name] = std::move(heap_dump);
+  }
+
+  std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
+                                             allocator_name);
+  overhead.DumpInto(base_name.c_str(), this);
 }
 
 void ProcessMemoryDump::Clear() {
@@ -163,7 +272,6 @@
     has_process_mmaps_ = false;
   }
 
-  allocator_dumps_storage_.clear();
   allocator_dumps_.clear();
   allocator_dumps_edges_.clear();
   heap_dumps_.clear();
@@ -173,14 +281,9 @@
   DCHECK(!other->has_process_totals() && !other->has_process_mmaps());
 
   // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
-  // into this ProcessMemoryDump.
-  for (MemoryAllocatorDump* mad : other->allocator_dumps_storage_) {
-    // Check that we don't merge duplicates.
-    DCHECK_EQ(0ul, allocator_dumps_.count(mad->absolute_name()));
-    allocator_dumps_storage_.push_back(mad);
-    allocator_dumps_[mad->absolute_name()] = mad;
-  }
-  other->allocator_dumps_storage_.weak_clear();
+  // into this ProcessMemoryDump, checking for duplicates.
+  for (auto& it : other->allocator_dumps_)
+    AddAllocatorDumpInternal(std::move(it.second));
   other->allocator_dumps_.clear();
 
   // Move all the edges.
@@ -189,7 +292,10 @@
                                 other->allocator_dumps_edges_.end());
   other->allocator_dumps_edges_.clear();
 
-  heap_dumps_.insert(other->heap_dumps_.begin(), other->heap_dumps_.end());
+  for (auto& it : other->heap_dumps_) {
+    DCHECK_EQ(0ul, heap_dumps_.count(it.first));
+    heap_dumps_.insert(std::make_pair(it.first, std::move(it.second)));
+  }
   other->heap_dumps_.clear();
 }
 
@@ -206,10 +312,10 @@
     value->EndDictionary();
   }
 
-  if (allocator_dumps_storage_.size() > 0) {
+  if (allocator_dumps_.size() > 0) {
     value->BeginDictionary("allocators");
-    for (const MemoryAllocatorDump* allocator_dump : allocator_dumps_storage_)
-      allocator_dump->AsValueInto(value);
+    for (const auto& allocator_dump_it : allocator_dumps_)
+      allocator_dump_it.second->AsValueInto(value);
     value->EndDictionary();
   }
 
@@ -247,10 +353,21 @@
 
 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
                                          const std::string& target_node_name) {
+  // Do not create new dumps for suballocations in background mode.
+  if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
+    return;
+
   std::string child_mad_name = target_node_name + "/__" + source.ToString();
   MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
   AddOwnershipEdge(source, target_child_mad->guid());
 }
 
+MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
+  DCHECK(is_black_hole_non_fatal_for_testing_);
+  if (!black_hole_mad_)
+    black_hole_mad_.reset(new MemoryAllocatorDump("discarded", this));
+  return black_hole_mad_.get();
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
index 5a66402..d020c7d 100644
--- a/base/trace_event/process_memory_dump.h
+++ b/base/trace_event/process_memory_dump.h
@@ -7,16 +7,16 @@
 
 #include <stddef.h>
 
+#include <unordered_map>
 #include <vector>
 
 #include "base/base_export.h"
-#include "base/containers/hash_tables.h"
-#include "base/containers/small_map.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/scoped_vector.h"
 #include "base/trace_event/memory_allocator_dump.h"
 #include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_request_args.h"
 #include "base/trace_event/memory_dump_session_state.h"
 #include "base/trace_event/process_memory_maps.h"
 #include "base/trace_event/process_memory_totals.h"
@@ -24,16 +24,13 @@
 
 // Define COUNT_RESIDENT_BYTES_SUPPORTED if platform supports counting of the
 // resident memory.
-// TODO(crbug.com/542671): COUNT_RESIDENT_BYTES_SUPPORTED is disabled on iOS
-// as it cause memory corruption on iOS 9.0+ devices.
-#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_IOS)
+#if (defined(OS_POSIX) && !defined(OS_NACL)) || defined(OS_WIN)
 #define COUNT_RESIDENT_BYTES_SUPPORTED
 #endif
 
 namespace base {
 namespace trace_event {
 
-class ConvertableToTraceFormat;
 class MemoryDumpManager;
 class MemoryDumpSessionState;
 class TracedValue;
@@ -52,12 +49,18 @@
   // Maps allocator dumps absolute names (allocator_name/heap/subheap) to
   // MemoryAllocatorDump instances.
   using AllocatorDumpsMap =
-      SmallMap<hash_map<std::string, MemoryAllocatorDump*>>;
+      std::unordered_map<std::string, std::unique_ptr<MemoryAllocatorDump>>;
 
   using HeapDumpsMap =
-      SmallMap<hash_map<std::string, scoped_refptr<TracedValue>>>;
+      std::unordered_map<std::string, std::unique_ptr<TracedValue>>;
 
 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+  // Returns the number of bytes in a kernel memory page. Some platforms may
+  // have a different value for kernel page sizes from user page sizes. It is
+  // important to use kernel memory page sizes for resident bytes calculation.
+  // In most cases, the two are the same.
+  static size_t GetSystemPageSize();
+
   // Returns the total bytes resident for a virtual address range, with given
   // |start_address| and |mapped_size|. |mapped_size| is specified in bytes. The
   // value returned is valid only if the given range is currently mmapped by the
@@ -65,7 +68,8 @@
   static size_t CountResidentBytes(void* start_address, size_t mapped_size);
 #endif
 
-  ProcessMemoryDump(const scoped_refptr<MemoryDumpSessionState>& session_state);
+  ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state,
+                    const MemoryDumpArgs& dump_args);
   ~ProcessMemoryDump();
 
   // Creates a new MemoryAllocatorDump with the given name and returns the
@@ -98,6 +102,15 @@
   MemoryAllocatorDump* CreateSharedGlobalAllocatorDump(
       const MemoryAllocatorDumpGuid& guid);
 
+  // Creates a shared MemoryAllocatorDump as CreateSharedGlobalAllocatorDump,
+  // but with a WEAK flag. A weak dump will be discarded unless a non-weak dump
+  // is created using CreateSharedGlobalAllocatorDump by at least one process.
+  // The WEAK flag does not apply if a non-weak dump with the same GUID already
+  // exists or is created later. All owners and children of the discarded dump
+  // will also be discarded transitively.
+  MemoryAllocatorDump* CreateWeakSharedGlobalAllocatorDump(
+      const MemoryAllocatorDumpGuid& guid);
+
   // Looks up a shared MemoryAllocatorDump given its guid.
   MemoryAllocatorDump* GetSharedGlobalAllocatorDump(
       const MemoryAllocatorDumpGuid& guid) const;
@@ -105,11 +118,12 @@
   // Returns the map of the MemoryAllocatorDumps added to this dump.
   const AllocatorDumpsMap& allocator_dumps() const { return allocator_dumps_; }
 
-  // Adds a heap dump for the allocator with |absolute_name|. The |TracedValue|
-  // must have the correct format. |trace_event::HeapDumper| will generate such
-  // a value from a |trace_event::AllocationRegister|.
-  void AddHeapDump(const std::string& absolute_name,
-                   scoped_refptr<TracedValue> heap_dump);
+  // Dumps heap usage with |allocator_name|.
+  void DumpHeapUsage(const base::hash_map<base::trace_event::AllocationContext,
+                                          base::trace_event::AllocationMetrics>&
+                         metrics_by_context,
+                     base::trace_event::TraceEventMemoryOverhead& overhead,
+                     const char* allocator_name);
 
   // Adds an ownership relationship between two MemoryAllocatorDump(s) with the
   // semantics: |source| owns |target|, and has the effect of attributing
@@ -161,8 +175,17 @@
   bool has_process_mmaps() const { return has_process_mmaps_; }
   void set_has_process_mmaps() { has_process_mmaps_ = true; }
 
+  const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
+
+  const MemoryDumpArgs& dump_args() const { return dump_args_; }
+
  private:
-  void AddAllocatorDumpInternal(MemoryAllocatorDump* mad);
+  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
+
+  MemoryAllocatorDump* AddAllocatorDumpInternal(
+      std::unique_ptr<MemoryAllocatorDump> mad);
+
+  MemoryAllocatorDump* GetBlackHoleMad();
 
   ProcessMemoryTotals process_totals_;
   bool has_process_totals_;
@@ -173,15 +196,24 @@
   AllocatorDumpsMap allocator_dumps_;
   HeapDumpsMap heap_dumps_;
 
-  // ProcessMemoryDump handles the memory ownership of all its belongings.
-  ScopedVector<MemoryAllocatorDump> allocator_dumps_storage_;
-
   // State shared among all PMDs instances created in a given trace session.
   scoped_refptr<MemoryDumpSessionState> session_state_;
 
   // Keeps track of relationships between MemoryAllocatorDump(s).
   std::vector<MemoryAllocatorDumpEdge> allocator_dumps_edges_;
 
+  // Level of detail of the current dump.
+  const MemoryDumpArgs dump_args_;
+
+  // This allocator dump is returned when an invalid dump is created in
+  // background mode. The attributes of the dump are ignored and not added to
+  // the trace.
+  std::unique_ptr<MemoryAllocatorDump> black_hole_mad_;
+
+  // When set to true, the DCHECK(s) for invalid dump creations on the
+  // background mode are disabled for testing.
+  static bool is_black_hole_non_fatal_for_testing_;
+
   DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDump);
 };
 
diff --git a/base/trace_event/process_memory_dump_unittest.cc b/base/trace_event/process_memory_dump_unittest.cc
index 88984ab..571774a 100644
--- a/base/trace_event/process_memory_dump_unittest.cc
+++ b/base/trace_event/process_memory_dump_unittest.cc
@@ -7,16 +7,33 @@
 #include <stddef.h>
 
 #include "base/memory/aligned_memory.h"
+#include "base/memory/ptr_util.h"
 #include "base/process/process_metrics.h"
 #include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_infra_background_whitelist.h"
 #include "base/trace_event/trace_event_argument.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 namespace trace_event {
 
+namespace {
+
+const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::DETAILED};
+const char* const kTestDumpNameWhitelist[] = {
+    "Whitelisted/TestName", "Whitelisted/TestName_0x?",
+    "Whitelisted/0x?/TestName", nullptr};
+
+TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
+  auto it = pmd.heap_dumps().find(name);
+  return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
+}
+
+}  // namespace
+
 TEST(ProcessMemoryDumpTest, Clear) {
-  scoped_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
+  std::unique_ptr<ProcessMemoryDump> pmd1(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
   pmd1->CreateAllocatorDump("mad1");
   pmd1->CreateAllocatorDump("mad2");
   ASSERT_FALSE(pmd1->allocator_dumps().empty());
@@ -30,8 +47,10 @@
   pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
                          MemoryAllocatorDumpGuid(4242));
 
-  MemoryAllocatorDumpGuid shared_mad_guid(1);
-  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  MemoryAllocatorDumpGuid shared_mad_guid1(1);
+  MemoryAllocatorDumpGuid shared_mad_guid2(2);
+  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
 
   pmd1->Clear();
   ASSERT_TRUE(pmd1->allocator_dumps().empty());
@@ -41,49 +60,74 @@
   ASSERT_FALSE(pmd1->has_process_totals());
   ASSERT_FALSE(pmd1->has_process_mmaps());
   ASSERT_TRUE(pmd1->process_mmaps()->vm_regions().empty());
-  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid));
+  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
   pmd1->AsValueInto(traced_value.get());
 
   // Check that the pmd can be reused and behaves as expected.
-  auto mad1 = pmd1->CreateAllocatorDump("mad1");
-  auto mad3 = pmd1->CreateAllocatorDump("mad3");
-  auto shared_mad = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid);
-  ASSERT_EQ(3u, pmd1->allocator_dumps().size());
+  auto* mad1 = pmd1->CreateAllocatorDump("mad1");
+  auto* mad3 = pmd1->CreateAllocatorDump("mad3");
+  auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  auto* shared_mad2 =
+      pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
+  ASSERT_EQ(4u, pmd1->allocator_dumps().size());
   ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
   ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
   ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
-  ASSERT_EQ(shared_mad, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid));
+  ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+  ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad2->flags());
 
-  traced_value = new TracedValue();
+  traced_value.reset(new TracedValue);
   pmd1->AsValueInto(traced_value.get());
 
   pmd1.reset();
 }
 
 TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
+  hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+  metrics_by_context[AllocationContext()] = { 1, 1 };
+  TraceEventMemoryOverhead overhead;
 
-  scoped_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
-  auto mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
-  auto mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
+  scoped_refptr<MemoryDumpSessionState> session_state =
+      new MemoryDumpSessionState;
+  session_state->SetStackFrameDeduplicator(
+      WrapUnique(new StackFrameDeduplicator));
+  session_state->SetTypeNameDeduplicator(
+      WrapUnique(new TypeNameDeduplicator));
+  std::unique_ptr<ProcessMemoryDump> pmd1(
+      new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+  auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
+  auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
   pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
+  pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
+  pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
 
-  scoped_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(nullptr));
-  auto mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
-  auto mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
-  pmd1->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
+  std::unique_ptr<ProcessMemoryDump> pmd2(
+      new ProcessMemoryDump(session_state.get(), kDetailedDumpArgs));
+  auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
+  auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
+  pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
+  pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
+  pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
 
-  MemoryAllocatorDumpGuid shared_mad_guid(1);
-  auto shared_mad = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  MemoryAllocatorDumpGuid shared_mad_guid1(1);
+  MemoryAllocatorDumpGuid shared_mad_guid2(2);
+  auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  auto* shared_mad2 =
+      pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
 
   pmd1->TakeAllDumpsFrom(pmd2.get());
 
   // Make sure that pmd2 is empty but still usable after it has been emptied.
   ASSERT_TRUE(pmd2->allocator_dumps().empty());
   ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
+  ASSERT_TRUE(pmd2->heap_dumps().empty());
   pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
   ASSERT_EQ(1u, pmd2->allocator_dumps().size());
   ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
@@ -98,33 +142,41 @@
   pmd2.reset();
 
   // Now check that |pmd1| has been effectively merged.
-  ASSERT_EQ(5u, pmd1->allocator_dumps().size());
+  ASSERT_EQ(6u, pmd1->allocator_dumps().size());
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
   ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
-  ASSERT_EQ(shared_mad, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid));
+  ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+  ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
+  ASSERT_EQ(4u, pmd1->heap_dumps().size());
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump1") != nullptr);
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd1/heap_dump2") != nullptr);
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump1") != nullptr);
+  ASSERT_TRUE(GetHeapDump(*pmd1, "pmd2/heap_dump2") != nullptr);
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  traced_value = new TracedValue();
+  traced_value.reset(new TracedValue);
   pmd1->AsValueInto(traced_value.get());
 
   pmd1.reset();
 }
 
 TEST(ProcessMemoryDumpTest, Suballocations) {
-  scoped_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
   const std::string allocator_dump_name = "fakealloc/allocated_objects";
   pmd->CreateAllocatorDump(allocator_dump_name);
 
   // Create one allocation with an auto-assigned guid and mark it as a
   // suballocation of "fakealloc/allocated_objects".
-  auto pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
+  auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
   pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
 
   // Same here, but this time create an allocation with an explicit guid.
-  auto pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
+  auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
                                             MemoryAllocatorDumpGuid(0x42));
   pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
 
@@ -151,27 +203,97 @@
   ASSERT_TRUE(found_edge[1]);
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
   pmd->AsValueInto(traced_value.get());
 
   pmd.reset();
 }
 
+TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, kDetailedDumpArgs));
+  MemoryAllocatorDumpGuid shared_mad_guid(1);
+  auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad2);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad3);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad4);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+
+  auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad5);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+}
+
+TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
+  MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::BACKGROUND};
+  std::unique_ptr<ProcessMemoryDump> pmd(
+      new ProcessMemoryDump(nullptr, background_args));
+  ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
+  SetAllocatorDumpNameWhitelistForTesting(kTestDumpNameWhitelist);
+  MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad();
+
+  // Invalid dump names.
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("NotWhitelisted/TestName"));
+  EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
+  EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/Test"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Not/Whitelisted/TestName"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName/Google"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName/0x1a2Google"));
+  EXPECT_EQ(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName/__12/Google"));
+
+  // Global dumps.
+  MemoryAllocatorDumpGuid guid(1);
+  EXPECT_EQ(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
+  EXPECT_EQ(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
+  EXPECT_EQ(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
+
+  // Suballocations.
+  pmd->AddSuballocation(guid, "malloc/allocated_objects");
+  EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
+  EXPECT_EQ(0u, pmd->allocator_dumps_.size());
+
+  // Valid dump names.
+  EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Whitelisted/TestName"));
+  EXPECT_NE(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/TestName_0xA1b2"));
+  EXPECT_NE(black_hole_mad,
+            pmd->CreateAllocatorDump("Whitelisted/0xaB/TestName"));
+
+  // GetAllocatorDump is consistent.
+  EXPECT_EQ(black_hole_mad, pmd->GetAllocatorDump("NotWhitelisted/TestName"));
+  EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Whitelisted/TestName"));
+}
+
 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
 TEST(ProcessMemoryDumpTest, CountResidentBytes) {
-  const size_t page_size = base::GetPageSize();
+  const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
 
   // Allocate few page of dirty memory and check if it is resident.
   const size_t size1 = 5 * page_size;
-  scoped_ptr<char, base::AlignedFreeDeleter> memory1(
+  std::unique_ptr<char, base::AlignedFreeDeleter> memory1(
       static_cast<char*>(base::AlignedAlloc(size1, page_size)));
   memset(memory1.get(), 0, size1);
   size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1.get(), size1);
   ASSERT_EQ(res1, size1);
 
-  // Allocate a large memory segment (>32Mib).
-  const size_t kVeryLargeMemorySize = 34 * 1024 * 1024;
-  scoped_ptr<char, base::AlignedFreeDeleter> memory2(
+  // Allocate a large memory segment (> 8Mib).
+  const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
+  std::unique_ptr<char, base::AlignedFreeDeleter> memory2(
       static_cast<char*>(base::AlignedAlloc(kVeryLargeMemorySize, page_size)));
   memset(memory2.get(), 0, kVeryLargeMemorySize);
   size_t res2 = ProcessMemoryDump::CountResidentBytes(memory2.get(),
diff --git a/base/trace_event/process_memory_maps.cc b/base/trace_event/process_memory_maps.cc
index 31083a8..a121239 100644
--- a/base/trace_event/process_memory_maps.cc
+++ b/base/trace_event/process_memory_maps.cc
@@ -15,6 +15,7 @@
 const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsRead = 4;
 const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite = 2;
 const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsExec = 1;
+const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsMayshare = 128;
 
 ProcessMemoryMaps::VMRegion::VMRegion()
     : start_address(0),
@@ -28,6 +29,8 @@
       byte_stats_proportional_resident(0) {
 }
 
+ProcessMemoryMaps::VMRegion::VMRegion(const VMRegion& other) = default;
+
 ProcessMemoryMaps::ProcessMemoryMaps() {
 }
 
diff --git a/base/trace_event/process_memory_maps.h b/base/trace_event/process_memory_maps.h
index 3dfcc0c..6a73674 100644
--- a/base/trace_event/process_memory_maps.h
+++ b/base/trace_event/process_memory_maps.h
@@ -25,8 +25,10 @@
     static const uint32_t kProtectionFlagsRead;
     static const uint32_t kProtectionFlagsWrite;
     static const uint32_t kProtectionFlagsExec;
+    static const uint32_t kProtectionFlagsMayshare;
 
     VMRegion();
+    VMRegion(const VMRegion& other);
 
     uint64_t start_address;
     uint64_t size_in_bytes;
diff --git a/base/trace_event/process_memory_maps_dump_provider.cc b/base/trace_event/process_memory_maps_dump_provider.cc
deleted file mode 100644
index 4c3959f..0000000
--- a/base/trace_event/process_memory_maps_dump_provider.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_maps_dump_provider.h"
-
-#include <stdint.h>
-
-#include "base/files/scoped_file.h"
-#include "base/format_macros.h"
-#include "base/logging.h"
-#include "base/strings/string_util.h"
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_maps.h"
-
-namespace base {
-namespace trace_event {
-
-// static
-FILE* ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = nullptr;
-
-namespace {
-
-const uint32_t kMaxLineSize = 4096;
-
-bool ParseSmapsHeader(const char* header_line,
-                      ProcessMemoryMaps::VMRegion* region) {
-  // e.g., "00400000-00421000 r-xp 00000000 fc:01 1234  /foo.so\n"
-  bool res = true;  // Whether this region should be appended or skipped.
-  uint64_t end_addr = 0;
-  char protection_flags[5] = {0};
-  char mapped_file[kMaxLineSize];
-
-  if (sscanf(header_line, "%" SCNx64 "-%" SCNx64 " %4c %*s %*s %*s%4095[^\n]\n",
-             &region->start_address, &end_addr, protection_flags,
-             mapped_file) != 4)
-    return false;
-
-  if (end_addr > region->start_address) {
-    region->size_in_bytes = end_addr - region->start_address;
-  } else {
-    // This is not just paranoia, it can actually happen (See crbug.com/461237).
-    region->size_in_bytes = 0;
-    res = false;
-  }
-
-  region->protection_flags = 0;
-  if (protection_flags[0] == 'r') {
-    region->protection_flags |=
-        ProcessMemoryMaps::VMRegion::kProtectionFlagsRead;
-  }
-  if (protection_flags[1] == 'w') {
-    region->protection_flags |=
-        ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite;
-  }
-  if (protection_flags[2] == 'x') {
-    region->protection_flags |=
-        ProcessMemoryMaps::VMRegion::kProtectionFlagsExec;
-  }
-
-  region->mapped_file = mapped_file;
-  TrimWhitespaceASCII(region->mapped_file, TRIM_ALL, &region->mapped_file);
-
-  return res;
-}
-
-uint64_t ReadCounterBytes(char* counter_line) {
-  uint64_t counter_value = 0;
-  int res = sscanf(counter_line, "%*s %" SCNu64 " kB", &counter_value);
-  DCHECK_EQ(1, res);
-  return counter_value * 1024;
-}
-
-uint32_t ParseSmapsCounter(char* counter_line,
-                           ProcessMemoryMaps::VMRegion* region) {
-  // A smaps counter lines looks as follows: "RSS:  0 Kb\n"
-  uint32_t res = 1;
-  char counter_name[20];
-  int did_read = sscanf(counter_line, "%19[^\n ]", counter_name);
-  DCHECK_EQ(1, did_read);
-
-  if (strcmp(counter_name, "Pss:") == 0) {
-    region->byte_stats_proportional_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Private_Dirty:") == 0) {
-    region->byte_stats_private_dirty_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Private_Clean:") == 0) {
-    region->byte_stats_private_clean_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Shared_Dirty:") == 0) {
-    region->byte_stats_shared_dirty_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Shared_Clean:") == 0) {
-    region->byte_stats_shared_clean_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Swap:") == 0) {
-    region->byte_stats_swapped = ReadCounterBytes(counter_line);
-  } else {
-    res = 0;
-  }
-
-  return res;
-}
-
-uint32_t ReadLinuxProcSmapsFile(FILE* smaps_file, ProcessMemoryMaps* pmm) {
-  if (!smaps_file)
-    return 0;
-
-  fseek(smaps_file, 0, SEEK_SET);
-
-  char line[kMaxLineSize];
-  const uint32_t kNumExpectedCountersPerRegion = 6;
-  uint32_t counters_parsed_for_current_region = 0;
-  uint32_t num_valid_regions = 0;
-  ProcessMemoryMaps::VMRegion region;
-  bool should_add_current_region = false;
-  for (;;) {
-    line[0] = '\0';
-    if (fgets(line, kMaxLineSize, smaps_file) == nullptr)
-      break;
-    DCHECK_GT(strlen(line), 0u);
-    if (isxdigit(line[0]) && !isupper(line[0])) {
-      region = ProcessMemoryMaps::VMRegion();
-      counters_parsed_for_current_region = 0;
-      should_add_current_region = ParseSmapsHeader(line, &region);
-    } else {
-      counters_parsed_for_current_region += ParseSmapsCounter(line, &region);
-      DCHECK_LE(counters_parsed_for_current_region,
-                kNumExpectedCountersPerRegion);
-      if (counters_parsed_for_current_region == kNumExpectedCountersPerRegion) {
-        if (should_add_current_region) {
-          pmm->AddVMRegion(region);
-          ++num_valid_regions;
-          should_add_current_region = false;
-        }
-      }
-    }
-  }
-  return num_valid_regions;
-}
-
-}  // namespace
-
-// static
-ProcessMemoryMapsDumpProvider* ProcessMemoryMapsDumpProvider::GetInstance() {
-  return Singleton<ProcessMemoryMapsDumpProvider,
-                   LeakySingletonTraits<ProcessMemoryMapsDumpProvider>>::get();
-}
-
-ProcessMemoryMapsDumpProvider::ProcessMemoryMapsDumpProvider() {
-}
-
-ProcessMemoryMapsDumpProvider::~ProcessMemoryMapsDumpProvider() {
-}
-
-// Called at trace dump point time. Creates a snapshot of the memory maps for
-// the current process.
-bool ProcessMemoryMapsDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
-                                                 ProcessMemoryDump* pmd) {
-  // Snapshot of memory maps is not taken for light dump requests.
-  if (args.level_of_detail == MemoryDumpLevelOfDetail::LIGHT)
-    return true;
-
-  uint32_t res = 0;
-  if (UNLIKELY(proc_smaps_for_testing)) {
-    res = ReadLinuxProcSmapsFile(proc_smaps_for_testing, pmd->process_mmaps());
-  } else {
-    ScopedFILE smaps_file(fopen("/proc/self/smaps", "r"));
-    res = ReadLinuxProcSmapsFile(smaps_file.get(), pmd->process_mmaps());
-  }
-
-  if (res > 0) {
-    pmd->set_has_process_mmaps();
-    return true;
-  }
-  return false;
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/process_memory_maps_dump_provider.h b/base/trace_event/process_memory_maps_dump_provider.h
deleted file mode 100644
index 9d8b8b9..0000000
--- a/base/trace_event/process_memory_maps_dump_provider.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_DUMP_PROVIDER_H_
-#define BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_DUMP_PROVIDER_H_
-
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/singleton.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace trace_event {
-
-// Dump provider which collects process-wide memory stats.
-class BASE_EXPORT ProcessMemoryMapsDumpProvider : public MemoryDumpProvider {
- public:
-  static ProcessMemoryMapsDumpProvider* GetInstance();
-
-  // MemoryDumpProvider implementation.
-  bool OnMemoryDump(const MemoryDumpArgs& args,
-                    ProcessMemoryDump* pmd) override;
-
- private:
-  friend struct DefaultSingletonTraits<ProcessMemoryMapsDumpProvider>;
-  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryMapsDumpProviderTest, ParseProcSmaps);
-
-  static FILE* proc_smaps_for_testing;
-
-  ProcessMemoryMapsDumpProvider();
-  ~ProcessMemoryMapsDumpProvider() override;
-
-  DISALLOW_COPY_AND_ASSIGN(ProcessMemoryMapsDumpProvider);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_DUMP_PROVIDER_H_
diff --git a/base/trace_event/process_memory_maps_dump_provider_unittest.cc b/base/trace_event/process_memory_maps_dump_provider_unittest.cc
deleted file mode 100644
index 624f96f..0000000
--- a/base/trace_event/process_memory_maps_dump_provider_unittest.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_maps_dump_provider.h"
-
-#include <stdint.h>
-
-#include "base/files/file_util.h"
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_maps.h"
-#include "base/trace_event/trace_event_argument.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-const char kTestSmaps1[] =
-    "00400000-004be000 r-xp 00000000 fc:01 1234              /file/1\n"
-    "Size:                760 kB\n"
-    "Rss:                 296 kB\n"
-    "Pss:                 162 kB\n"
-    "Shared_Clean:        228 kB\n"
-    "Shared_Dirty:          0 kB\n"
-    "Private_Clean:         0 kB\n"
-    "Private_Dirty:        68 kB\n"
-    "Referenced:          296 kB\n"
-    "Anonymous:            68 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  4 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd\n"
-    "ff000000-ff800000 -w-p 00001080 fc:01 0            /file/name with space\n"
-    "Size:                  0 kB\n"
-    "Rss:                 192 kB\n"
-    "Pss:                 128 kB\n"
-    "Shared_Clean:        120 kB\n"
-    "Shared_Dirty:          4 kB\n"
-    "Private_Clean:        60 kB\n"
-    "Private_Dirty:         8 kB\n"
-    "Referenced:          296 kB\n"
-    "Anonymous:             0 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd";
-
-const char kTestSmaps2[] =
-    // An invalid region, with zero size and overlapping with the last one
-    // (See crbug.com/461237).
-    "7fe7ce79c000-7fe7ce79c000 ---p 00000000 00:00 0 \n"
-    "Size:                  4 kB\n"
-    "Rss:                   0 kB\n"
-    "Pss:                   0 kB\n"
-    "Shared_Clean:          0 kB\n"
-    "Shared_Dirty:          0 kB\n"
-    "Private_Clean:         0 kB\n"
-    "Private_Dirty:         0 kB\n"
-    "Referenced:            0 kB\n"
-    "Anonymous:             0 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd\n"
-    // A invalid region with its range going backwards.
-    "00400000-00200000 ---p 00000000 00:00 0 \n"
-    "Size:                  4 kB\n"
-    "Rss:                   0 kB\n"
-    "Pss:                   0 kB\n"
-    "Shared_Clean:          0 kB\n"
-    "Shared_Dirty:          0 kB\n"
-    "Private_Clean:         0 kB\n"
-    "Private_Dirty:         0 kB\n"
-    "Referenced:            0 kB\n"
-    "Anonymous:             0 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd\n"
-    // A good anonymous region at the end.
-    "7fe7ce79c000-7fe7ce7a8000 ---p 00000000 00:00 0 \n"
-    "Size:                 48 kB\n"
-    "Rss:                  40 kB\n"
-    "Pss:                  32 kB\n"
-    "Shared_Clean:         16 kB\n"
-    "Shared_Dirty:         12 kB\n"
-    "Private_Clean:         8 kB\n"
-    "Private_Dirty:         4 kB\n"
-    "Referenced:           40 kB\n"
-    "Anonymous:            16 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd wr mr mw me ac sd\n";
-
-void CreateAndSetSmapsFileForTesting(const char* smaps_string,
-                                     ScopedFILE& file) {
-  FilePath temp_path;
-  FILE* temp_file = CreateAndOpenTemporaryFile(&temp_path);
-  file.reset(temp_file);
-  ASSERT_TRUE(temp_file);
-
-  ASSERT_TRUE(base::WriteFileDescriptor(fileno(temp_file), smaps_string,
-                                        strlen(smaps_string)));
-}
-
-}  // namespace
-
-TEST(ProcessMemoryMapsDumpProviderTest, ParseProcSmaps) {
-  const uint32_t kProtR = ProcessMemoryMaps::VMRegion::kProtectionFlagsRead;
-  const uint32_t kProtW = ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite;
-  const uint32_t kProtX = ProcessMemoryMaps::VMRegion::kProtectionFlagsExec;
-  const MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
-
-  auto pmmdp = ProcessMemoryMapsDumpProvider::GetInstance();
-
-  // Emulate an empty /proc/self/smaps.
-  ProcessMemoryDump pmd_invalid(nullptr /* session_state */);
-  ScopedFILE empty_file(OpenFile(FilePath("/dev/null"), "r"));
-  ASSERT_TRUE(empty_file.get());
-  ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = empty_file.get();
-  pmmdp->OnMemoryDump(dump_args, &pmd_invalid);
-  ASSERT_FALSE(pmd_invalid.has_process_mmaps());
-
-  // Parse the 1st smaps file.
-  ProcessMemoryDump pmd_1(nullptr /* session_state */);
-  ScopedFILE temp_file1;
-  CreateAndSetSmapsFileForTesting(kTestSmaps1, temp_file1);
-  ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = temp_file1.get();
-  pmmdp->OnMemoryDump(dump_args, &pmd_1);
-  ASSERT_TRUE(pmd_1.has_process_mmaps());
-  const auto& regions_1 = pmd_1.process_mmaps()->vm_regions();
-  ASSERT_EQ(2UL, regions_1.size());
-
-  EXPECT_EQ(0x00400000UL, regions_1[0].start_address);
-  EXPECT_EQ(0x004be000UL - 0x00400000UL, regions_1[0].size_in_bytes);
-  EXPECT_EQ(kProtR | kProtX, regions_1[0].protection_flags);
-  EXPECT_EQ("/file/1", regions_1[0].mapped_file);
-  EXPECT_EQ(162 * 1024UL, regions_1[0].byte_stats_proportional_resident);
-  EXPECT_EQ(228 * 1024UL, regions_1[0].byte_stats_shared_clean_resident);
-  EXPECT_EQ(0UL, regions_1[0].byte_stats_shared_dirty_resident);
-  EXPECT_EQ(0UL, regions_1[0].byte_stats_private_clean_resident);
-  EXPECT_EQ(68 * 1024UL, regions_1[0].byte_stats_private_dirty_resident);
-  EXPECT_EQ(4 * 1024UL, regions_1[0].byte_stats_swapped);
-
-  EXPECT_EQ(0xff000000UL, regions_1[1].start_address);
-  EXPECT_EQ(0xff800000UL - 0xff000000UL, regions_1[1].size_in_bytes);
-  EXPECT_EQ(kProtW, regions_1[1].protection_flags);
-  EXPECT_EQ("/file/name with space", regions_1[1].mapped_file);
-  EXPECT_EQ(128 * 1024UL, regions_1[1].byte_stats_proportional_resident);
-  EXPECT_EQ(120 * 1024UL, regions_1[1].byte_stats_shared_clean_resident);
-  EXPECT_EQ(4 * 1024UL, regions_1[1].byte_stats_shared_dirty_resident);
-  EXPECT_EQ(60 * 1024UL, regions_1[1].byte_stats_private_clean_resident);
-  EXPECT_EQ(8 * 1024UL, regions_1[1].byte_stats_private_dirty_resident);
-  EXPECT_EQ(0 * 1024UL, regions_1[1].byte_stats_swapped);
-
-  // Parse the 2nd smaps file.
-  ProcessMemoryDump pmd_2(nullptr /* session_state */);
-  ScopedFILE temp_file2;
-  CreateAndSetSmapsFileForTesting(kTestSmaps2, temp_file2);
-  ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = temp_file2.get();
-  pmmdp->OnMemoryDump(dump_args, &pmd_2);
-  ASSERT_TRUE(pmd_2.has_process_mmaps());
-  const auto& regions_2 = pmd_2.process_mmaps()->vm_regions();
-  ASSERT_EQ(1UL, regions_2.size());
-  EXPECT_EQ(0x7fe7ce79c000UL, regions_2[0].start_address);
-  EXPECT_EQ(0x7fe7ce7a8000UL - 0x7fe7ce79c000UL, regions_2[0].size_in_bytes);
-  EXPECT_EQ(0U, regions_2[0].protection_flags);
-  EXPECT_EQ("", regions_2[0].mapped_file);
-  EXPECT_EQ(32 * 1024UL, regions_2[0].byte_stats_proportional_resident);
-  EXPECT_EQ(16 * 1024UL, regions_2[0].byte_stats_shared_clean_resident);
-  EXPECT_EQ(12 * 1024UL, regions_2[0].byte_stats_shared_dirty_resident);
-  EXPECT_EQ(8 * 1024UL, regions_2[0].byte_stats_private_clean_resident);
-  EXPECT_EQ(4 * 1024UL, regions_2[0].byte_stats_private_dirty_resident);
-  EXPECT_EQ(0 * 1024UL, regions_2[0].byte_stats_swapped);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/process_memory_totals_dump_provider.cc b/base/trace_event/process_memory_totals_dump_provider.cc
deleted file mode 100644
index 917dcf0..0000000
--- a/base/trace_event/process_memory_totals_dump_provider.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_totals_dump_provider.h"
-
-#include <stddef.h>
-
-#include "base/process/process_metrics.h"
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_totals.h"
-#include "build/build_config.h"
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-#include <fcntl.h>
-
-#include "base/files/file_util.h"
-
-namespace {
-bool kernel_supports_rss_peak_reset = true;
-const char kClearPeakRssCommand[] = "5";
-}
-#endif
-
-namespace base {
-namespace trace_event {
-
-// static
-uint64_t ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 0;
-
-// static
-ProcessMemoryTotalsDumpProvider*
-ProcessMemoryTotalsDumpProvider::GetInstance() {
-  return Singleton<
-      ProcessMemoryTotalsDumpProvider,
-      LeakySingletonTraits<ProcessMemoryTotalsDumpProvider>>::get();
-}
-
-ProcessMemoryTotalsDumpProvider::ProcessMemoryTotalsDumpProvider()
-    : process_metrics_(ProcessMetrics::CreateCurrentProcessMetrics()) {}
-
-ProcessMemoryTotalsDumpProvider::~ProcessMemoryTotalsDumpProvider() {
-}
-
-// Called at trace dump point time. Creates a snapshot the memory counters for
-// the current process.
-bool ProcessMemoryTotalsDumpProvider::OnMemoryDump(
-    const MemoryDumpArgs& /* args */,
-    ProcessMemoryDump* pmd) {
-  const uint64_t rss_bytes = rss_bytes_for_testing
-                                 ? rss_bytes_for_testing
-                                 : process_metrics_->GetWorkingSetSize();
-
-  uint64_t peak_rss_bytes = 0;
-
-#if !defined(OS_IOS)
-  peak_rss_bytes = process_metrics_->GetPeakWorkingSetSize();
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-  if (kernel_supports_rss_peak_reset) {
-    // TODO(ssid): Fix crbug.com/461788 to write to the file from sandboxed
-    // processes.
-    int clear_refs_fd = open("/proc/self/clear_refs", O_WRONLY);
-    if (clear_refs_fd > 0 &&
-        WriteFileDescriptor(clear_refs_fd, kClearPeakRssCommand,
-                            sizeof(kClearPeakRssCommand))) {
-      pmd->process_totals()->set_is_peak_rss_resetable(true);
-    } else {
-      kernel_supports_rss_peak_reset = false;
-    }
-    close(clear_refs_fd);
-  }
-#elif defined(OS_MACOSX)
-  size_t private_bytes;
-  bool res = process_metrics_->GetMemoryBytes(&private_bytes,
-                                              nullptr /* shared_bytes */);
-  if (res) {
-    pmd->process_totals()->SetExtraFieldInBytes("private_bytes", private_bytes);
-  }
-#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
-#endif  // !defined(OS_IOS)
-
-  if (rss_bytes > 0) {
-    pmd->process_totals()->set_resident_set_bytes(rss_bytes);
-    pmd->process_totals()->set_peak_resident_set_bytes(peak_rss_bytes);
-    pmd->set_has_process_totals();
-    return true;
-  }
-
-  return false;
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/process_memory_totals_dump_provider.h b/base/trace_event/process_memory_totals_dump_provider.h
deleted file mode 100644
index d9573d3..0000000
--- a/base/trace_event/process_memory_totals_dump_provider.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_DUMP_PROVIDER_H_
-#define BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_DUMP_PROVIDER_H_
-
-#include <stdint.h>
-
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/singleton.h"
-#include "base/trace_event/memory_dump_provider.h"
-
-namespace base {
-
-class ProcessMetrics;
-
-namespace trace_event {
-
-// Dump provider which collects process-wide memory stats.
-class BASE_EXPORT ProcessMemoryTotalsDumpProvider : public MemoryDumpProvider {
- public:
-  static ProcessMemoryTotalsDumpProvider* GetInstance();
-
-  // MemoryDumpProvider implementation.
-  bool OnMemoryDump(const MemoryDumpArgs& args,
-                    ProcessMemoryDump* pmd) override;
-
- private:
-  friend struct DefaultSingletonTraits<ProcessMemoryTotalsDumpProvider>;
-  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryTotalsDumpProviderTest, DumpRSS);
-
-  static uint64_t rss_bytes_for_testing;
-
-  ProcessMemoryTotalsDumpProvider();
-  ~ProcessMemoryTotalsDumpProvider() override;
-
-  scoped_ptr<ProcessMetrics> process_metrics_;
-
-  DISALLOW_COPY_AND_ASSIGN(ProcessMemoryTotalsDumpProvider);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_DUMP_PROVIDER_H_
diff --git a/base/trace_event/process_memory_totals_dump_provider_unittest.cc b/base/trace_event/process_memory_totals_dump_provider_unittest.cc
deleted file mode 100644
index d3f517e..0000000
--- a/base/trace_event/process_memory_totals_dump_provider_unittest.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_totals_dump_provider.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_totals.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-TEST(ProcessMemoryTotalsDumpProviderTest, DumpRSS) {
-  const MemoryDumpArgs high_detail_args = {MemoryDumpLevelOfDetail::DETAILED};
-  auto pmtdp = ProcessMemoryTotalsDumpProvider::GetInstance();
-  scoped_ptr<ProcessMemoryDump> pmd_before(new ProcessMemoryDump(nullptr));
-  scoped_ptr<ProcessMemoryDump> pmd_after(new ProcessMemoryDump(nullptr));
-
-  ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 1024;
-  pmtdp->OnMemoryDump(high_detail_args, pmd_before.get());
-
-  // Pretend that the RSS of the process increased of +1M.
-  const size_t kAllocSize = 1048576;
-  ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing += kAllocSize;
-
-  pmtdp->OnMemoryDump(high_detail_args, pmd_after.get());
-
-  ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 0;
-
-  ASSERT_TRUE(pmd_before->has_process_totals());
-  ASSERT_TRUE(pmd_after->has_process_totals());
-
-  const uint64_t rss_before =
-      pmd_before->process_totals()->resident_set_bytes();
-  const uint64_t rss_after = pmd_after->process_totals()->resident_set_bytes();
-
-  EXPECT_NE(0U, rss_before);
-  EXPECT_NE(0U, rss_after);
-
-  EXPECT_EQ(rss_after - rss_before, kAllocSize);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
index 3b2069a..d40f430 100644
--- a/base/trace_event/trace_buffer.cc
+++ b/base/trace_event/trace_buffer.cc
@@ -4,11 +4,12 @@
 
 #include "base/trace_event/trace_buffer.h"
 
+#include <memory>
 #include <utility>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/trace_event_impl.h"
 
 namespace base {
@@ -30,7 +31,9 @@
       recyclable_chunks_queue_[i] = i;
   }
 
-  scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+  std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+    HEAP_PROFILER_SCOPED_IGNORE;
+
     // Because the number of threads is much less than the number of chunks,
     // the queue should never be empty.
     DCHECK(!QueueIsEmpty());
@@ -49,10 +52,11 @@
     else
       chunk = new TraceBufferChunk(current_chunk_seq_++);
 
-    return scoped_ptr<TraceBufferChunk>(chunk);
+    return std::unique_ptr<TraceBufferChunk>(chunk);
   }
 
-  void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+  void ReturnChunk(size_t index,
+                   std::unique_ptr<TraceBufferChunk> chunk) override {
     // When this method is called, the queue should not be full because it
     // can contain all chunks including the one to be returned.
     DCHECK(!QueueIsFull());
@@ -99,19 +103,6 @@
     return NULL;
   }
 
-  scoped_ptr<TraceBuffer> CloneForIteration() const override {
-    scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
-    for (size_t queue_index = queue_head_; queue_index != queue_tail_;
-         queue_index = NextQueueIndex(queue_index)) {
-      size_t chunk_index = recyclable_chunks_queue_[queue_index];
-      if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
-        continue;
-      TraceBufferChunk* chunk = chunks_[chunk_index].get();
-      cloned_buffer->chunks_.push_back(chunk ? chunk->Clone() : NULL);
-    }
-    return std::move(cloned_buffer);
-  }
-
   void EstimateTraceMemoryOverhead(
       TraceEventMemoryOverhead* overhead) override {
     overhead->Add("TraceBufferRingBuffer", sizeof(*this));
@@ -125,43 +116,6 @@
   }
 
  private:
-  class ClonedTraceBuffer : public TraceBuffer {
-   public:
-    ClonedTraceBuffer() : current_iteration_index_(0) {}
-
-    // The only implemented method.
-    const TraceBufferChunk* NextChunk() override {
-      return current_iteration_index_ < chunks_.size()
-                 ? chunks_[current_iteration_index_++].get()
-                 : NULL;
-    }
-
-    scoped_ptr<TraceBufferChunk> GetChunk(size_t* /* index */) override {
-      NOTIMPLEMENTED();
-      return scoped_ptr<TraceBufferChunk>();
-    }
-    void ReturnChunk(size_t /*index*/, scoped_ptr<TraceBufferChunk>) override {
-      NOTIMPLEMENTED();
-    }
-    bool IsFull() const override { return false; }
-    size_t Size() const override { return 0; }
-    size_t Capacity() const override { return 0; }
-    TraceEvent* GetEventByHandle(TraceEventHandle /* handle */) override {
-      return NULL;
-    }
-    scoped_ptr<TraceBuffer> CloneForIteration() const override {
-      NOTIMPLEMENTED();
-      return scoped_ptr<TraceBuffer>();
-    }
-    void EstimateTraceMemoryOverhead(
-        TraceEventMemoryOverhead* /* overhead */) override {
-      NOTIMPLEMENTED();
-    }
-
-    size_t current_iteration_index_;
-    std::vector<scoped_ptr<TraceBufferChunk>> chunks_;
-  };
-
   bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
 
   size_t QueueSize() const {
@@ -185,9 +139,9 @@
   }
 
   size_t max_chunks_;
-  std::vector<scoped_ptr<TraceBufferChunk>> chunks_;
+  std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
 
-  scoped_ptr<size_t[]> recyclable_chunks_queue_;
+  std::unique_ptr<size_t[]> recyclable_chunks_queue_;
   size_t queue_head_;
   size_t queue_tail_;
 
@@ -206,7 +160,9 @@
     chunks_.reserve(max_chunks_);
   }
 
-  scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+  std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+    HEAP_PROFILER_SCOPED_IGNORE;
+
     // This function may be called when adding normal events or indirectly from
     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
     // have to add the metadata events and flush thread-local buffers even if
@@ -215,11 +171,12 @@
     chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
     ++in_flight_chunk_count_;
     // + 1 because zero chunk_seq is not allowed.
-    return scoped_ptr<TraceBufferChunk>(
+    return std::unique_ptr<TraceBufferChunk>(
         new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
   }
 
-  void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+  void ReturnChunk(size_t index,
+                   std::unique_ptr<TraceBufferChunk> chunk) override {
     DCHECK_GT(in_flight_chunk_count_, 0u);
     DCHECK_LT(index, chunks_.size());
     DCHECK(!chunks_[index]);
@@ -257,11 +214,6 @@
     return NULL;
   }
 
-  scoped_ptr<TraceBuffer> CloneForIteration() const override {
-    NOTIMPLEMENTED();
-    return scoped_ptr<TraceBuffer>();
-  }
-
   void EstimateTraceMemoryOverhead(
       TraceEventMemoryOverhead* overhead) override {
     const size_t chunks_ptr_vector_allocated_size =
@@ -308,14 +260,6 @@
   return &chunk_[*event_index];
 }
 
-scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
-  scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
-  cloned_chunk->next_free_ = next_free_;
-  for (size_t i = 0; i < next_free_; ++i)
-    cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
-  return cloned_chunk;
-}
-
 void TraceBufferChunk::EstimateTraceMemoryOverhead(
     TraceEventMemoryOverhead* overhead) {
   if (!cached_overhead_estimate_) {
diff --git a/base/trace_event/trace_buffer.h b/base/trace_event/trace_buffer.h
index a7b8059..4885a3c 100644
--- a/base/trace_event/trace_buffer.h
+++ b/base/trace_event/trace_buffer.h
@@ -39,8 +39,6 @@
     return &chunk_[index];
   }
 
-  scoped_ptr<TraceBufferChunk> Clone() const;
-
   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
 
   // These values must be kept consistent with the numbers of bits of
@@ -51,7 +49,7 @@
 
  private:
   size_t next_free_;
-  scoped_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
+  std::unique_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
   TraceEvent chunk_[kTraceBufferChunkSize];
   uint32_t seq_;
 };
@@ -61,9 +59,9 @@
  public:
   virtual ~TraceBuffer() {}
 
-  virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
+  virtual std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
   virtual void ReturnChunk(size_t index,
-                           scoped_ptr<TraceBufferChunk> chunk) = 0;
+                           std::unique_ptr<TraceBufferChunk> chunk) = 0;
 
   virtual bool IsFull() const = 0;
   virtual size_t Size() const = 0;
@@ -73,7 +71,6 @@
   // For iteration. Each TraceBuffer can only be iterated once.
   virtual const TraceBufferChunk* NextChunk() = 0;
 
-  virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
 
   // Computes an estimate of the size of the buffer, including all the retained
   // objects.
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 8e11078..b343ea0 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -10,9 +10,11 @@
 
 #include "base/json/json_reader.h"
 #include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
 #include "base/strings/pattern.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/memory_dump_request_args.h"
@@ -46,15 +48,18 @@
 // String parameters that is used to parse memory dump config in trace config
 // string.
 const char kMemoryDumpConfigParam[] = "memory_dump_config";
+const char kAllowedDumpModesParam[] = "allowed_dump_modes";
 const char kTriggersParam[] = "triggers";
 const char kPeriodicIntervalParam[] = "periodic_interval_ms";
 const char kModeParam[] = "mode";
+const char kHeapProfilerOptions[] = "heap_profiler_options";
+const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
 
 // Default configuration of memory dumps.
-const TraceConfig::MemoryDumpTriggerConfig kDefaultHeavyMemoryDumpTrigger = {
+const TraceConfig::MemoryDumpConfig::Trigger kDefaultHeavyMemoryDumpTrigger = {
     2000,  // periodic_interval_ms
     MemoryDumpLevelOfDetail::DETAILED};
-const TraceConfig::MemoryDumpTriggerConfig kDefaultLightMemoryDumpTrigger = {
+const TraceConfig::MemoryDumpConfig::Trigger kDefaultLightMemoryDumpTrigger = {
     250,  // periodic_interval_ms
     MemoryDumpLevelOfDetail::LIGHT};
 
@@ -63,29 +68,63 @@
  public:
   explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
       : trace_config_(trace_config) {}
+  ~ConvertableTraceConfigToTraceFormat() override {}
+
   void AppendAsTraceFormat(std::string* out) const override {
     out->append(trace_config_.ToString());
   }
 
- protected:
-  ~ConvertableTraceConfigToTraceFormat() override {}
-
  private:
   const TraceConfig trace_config_;
 };
 
+std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
+  std::set<MemoryDumpLevelOfDetail> all_modes;
+  for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
+       mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
+    all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
+  }
+  return all_modes;
+}
+
 }  // namespace
 
+TraceConfig::MemoryDumpConfig::HeapProfiler::HeapProfiler()
+    : breakdown_threshold_bytes(kDefaultBreakdownThresholdBytes) {}
+
+void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
+  breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
+}
+
+void TraceConfig::ResetMemoryDumpConfig(
+    const TraceConfig::MemoryDumpConfig& memory_dump_config) {
+  memory_dump_config_.Clear();
+  memory_dump_config_ = memory_dump_config;
+}
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {}
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
+    const MemoryDumpConfig& other) = default;
+
+TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {}
+
+void TraceConfig::MemoryDumpConfig::Clear() {
+  allowed_dump_modes.clear();
+  triggers.clear();
+  heap_profiler_options.Clear();
+}
+
 TraceConfig::TraceConfig() {
   InitializeDefault();
 }
 
-TraceConfig::TraceConfig(const std::string& category_filter_string,
-                         const std::string& trace_options_string) {
+TraceConfig::TraceConfig(StringPiece category_filter_string,
+                         StringPiece trace_options_string) {
   InitializeFromStrings(category_filter_string, trace_options_string);
 }
 
-TraceConfig::TraceConfig(const std::string& category_filter_string,
+TraceConfig::TraceConfig(StringPiece category_filter_string,
                          TraceRecordMode record_mode) {
   std::string trace_options_string;
   switch (record_mode) {
@@ -107,7 +146,11 @@
   InitializeFromStrings(category_filter_string, trace_options_string);
 }
 
-TraceConfig::TraceConfig(const std::string& config_string) {
+TraceConfig::TraceConfig(const DictionaryValue& config) {
+  InitializeFromConfigDict(config);
+}
+
+TraceConfig::TraceConfig(StringPiece config_string) {
   if (!config_string.empty())
     InitializeFromConfigString(config_string);
   else
@@ -149,18 +192,15 @@
 }
 
 std::string TraceConfig::ToString() const {
-  base::DictionaryValue dict;
-  ToDict(dict);
-
+  std::unique_ptr<DictionaryValue> dict = ToDict();
   std::string json;
-  base::JSONWriter::Write(dict, &json);
-
+  JSONWriter::Write(*dict, &json);
   return json;
 }
 
-scoped_refptr<ConvertableToTraceFormat>
+std::unique_ptr<ConvertableToTraceFormat>
 TraceConfig::AsConvertableToTraceFormat() const {
-  return new ConvertableTraceConfigToTraceFormat(*this);
+  return WrapUnique(new ConvertableTraceConfigToTraceFormat(*this));
 }
 
 std::string TraceConfig::ToCategoryFilterString() const {
@@ -179,20 +219,18 @@
 
   bool had_enabled_by_default = false;
   DCHECK(category_group_name);
-  CStringTokenizer category_group_tokens(
-      category_group_name, category_group_name + strlen(category_group_name),
-      ",");
+  std::string category_group_name_str = category_group_name;
+  StringTokenizer category_group_tokens(category_group_name_str, ",");
   while (category_group_tokens.GetNext()) {
     std::string category_group_token = category_group_tokens.token();
     // Don't allow empty tokens, nor tokens with leading or trailing space.
     DCHECK(!TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
                category_group_token))
         << "Disallowed category string";
-    if (IsCategoryEnabled(category_group_token.c_str())) {
+    if (IsCategoryEnabled(category_group_token.c_str()))
       return true;
-    }
-    if (!base::MatchPattern(category_group_token.c_str(),
-                            TRACE_DISABLED_BY_DEFAULT("*")))
+
+    if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
       had_enabled_by_default = true;
   }
   // Do a second pass to check for explicitly disabled categories
@@ -201,10 +239,8 @@
   bool category_group_disabled = false;
   while (category_group_tokens.GetNext()) {
     std::string category_group_token = category_group_tokens.token();
-    for (StringList::const_iterator ci = excluded_categories_.begin();
-         ci != excluded_categories_.end();
-         ++ci) {
-      if (base::MatchPattern(category_group_token.c_str(), ci->c_str())) {
+    for (const std::string& category : excluded_categories_) {
+      if (MatchPattern(category_group_token, category)) {
         // Current token of category_group_name is present in excluded_list.
         // Flag the exclusion and proceed further to check if any of the
         // remaining categories of category_group_name is not present in the
@@ -216,8 +252,7 @@
       // excluded_ list. So, if it's not a disabled-by-default category,
       // it has to be included_ list. Enable the category_group_name
       // for recording.
-      if (!base::MatchPattern(category_group_token.c_str(),
-                              TRACE_DISABLED_BY_DEFAULT("*"))) {
+      if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*"))) {
         category_group_disabled = false;
       }
     }
@@ -230,8 +265,8 @@
   // If the category group is not excluded, and there are no included patterns
   // we consider this category group enabled, as long as it had categories
   // other than disabled-by-default.
-  return !category_group_disabled &&
-         included_categories_.empty() && had_enabled_by_default;
+  return !category_group_disabled && had_enabled_by_default &&
+         included_categories_.empty();
 }
 
 void TraceConfig::Merge(const TraceConfig& config) {
@@ -254,9 +289,9 @@
     included_categories_.clear();
   }
 
-  memory_dump_config_.insert(memory_dump_config_.end(),
-                             config.memory_dump_config_.begin(),
-                             config.memory_dump_config_.end());
+  memory_dump_config_.triggers.insert(memory_dump_config_.triggers.end(),
+                             config.memory_dump_config_.triggers.begin(),
+                             config.memory_dump_config_.triggers.end());
 
   disabled_categories_.insert(disabled_categories_.end(),
                               config.disabled_categories_.begin(),
@@ -278,7 +313,7 @@
   disabled_categories_.clear();
   excluded_categories_.clear();
   synthetic_delays_.clear();
-  memory_dump_config_.clear();
+  memory_dump_config_.Clear();
 }
 
 void TraceConfig::InitializeDefault() {
@@ -286,22 +321,12 @@
   enable_sampling_ = false;
   enable_systrace_ = false;
   enable_argument_filter_ = false;
-  excluded_categories_.push_back("*Debug");
-  excluded_categories_.push_back("*Test");
 }
 
-void TraceConfig::InitializeFromConfigString(const std::string& config_string) {
-  scoped_ptr<base::Value> value(base::JSONReader::Read(config_string));
-  if (!value || !value->IsType(base::Value::TYPE_DICTIONARY)) {
-    InitializeDefault();
-    return;
-  }
-  scoped_ptr<base::DictionaryValue> dict(
-        static_cast<base::DictionaryValue*>(value.release()));
-
+void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
   record_mode_ = RECORD_UNTIL_FULL;
   std::string record_mode;
-  if (dict->GetString(kRecordModeParam, &record_mode)) {
+  if (dict.GetString(kRecordModeParam, &record_mode)) {
     if (record_mode == kRecordUntilFull) {
       record_mode_ = RECORD_UNTIL_FULL;
     } else if (record_mode == kRecordContinuously) {
@@ -313,72 +338,64 @@
     }
   }
 
-  bool enable_sampling;
-  if (!dict->GetBoolean(kEnableSamplingParam, &enable_sampling))
-    enable_sampling_ = false;
-  else
-    enable_sampling_ = enable_sampling;
+  bool val;
+  enable_sampling_ = dict.GetBoolean(kEnableSamplingParam, &val) ? val : false;
+  enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
+  enable_argument_filter_ =
+      dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
 
-  bool enable_systrace;
-  if (!dict->GetBoolean(kEnableSystraceParam, &enable_systrace))
-    enable_systrace_ = false;
-  else
-    enable_systrace_ = enable_systrace;
-
-  bool enable_argument_filter;
-  if (!dict->GetBoolean(kEnableArgumentFilterParam, &enable_argument_filter))
-    enable_argument_filter_ = false;
-  else
-    enable_argument_filter_ = enable_argument_filter;
-
-  base::ListValue* category_list = nullptr;
-  if (dict->GetList(kIncludedCategoriesParam, &category_list))
+  const ListValue* category_list = nullptr;
+  if (dict.GetList(kIncludedCategoriesParam, &category_list))
     SetCategoriesFromIncludedList(*category_list);
-  if (dict->GetList(kExcludedCategoriesParam, &category_list))
+  if (dict.GetList(kExcludedCategoriesParam, &category_list))
     SetCategoriesFromExcludedList(*category_list);
-  if (dict->GetList(kSyntheticDelaysParam, &category_list))
+  if (dict.GetList(kSyntheticDelaysParam, &category_list))
     SetSyntheticDelaysFromList(*category_list);
 
   if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
     // If dump triggers not set, the client is using the legacy with just
     // category enabled. So, use the default periodic dump config.
-    base::DictionaryValue* memory_dump_config = nullptr;
-    if (dict->GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
-      SetMemoryDumpConfig(*memory_dump_config);
+    const DictionaryValue* memory_dump_config = nullptr;
+    if (dict.GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
+      SetMemoryDumpConfigFromConfigDict(*memory_dump_config);
     else
       SetDefaultMemoryDumpConfig();
   }
 }
 
-void TraceConfig::InitializeFromStrings(
-    const std::string& category_filter_string,
-    const std::string& trace_options_string) {
+void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
+  auto dict = DictionaryValue::From(JSONReader::Read(config_string));
+  if (dict)
+    InitializeFromConfigDict(*dict);
+  else
+    InitializeDefault();
+}
+
+void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
+                                        StringPiece trace_options_string) {
   if (!category_filter_string.empty()) {
-    std::vector<std::string> split = base::SplitString(
-        category_filter_string, ",", base::TRIM_WHITESPACE,
-        base::SPLIT_WANT_ALL);
-    std::vector<std::string>::iterator iter;
-    for (iter = split.begin(); iter != split.end(); ++iter) {
-      std::string category = *iter;
+    std::vector<std::string> split = SplitString(
+        category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    for (const std::string& category : split) {
       // Ignore empty categories.
       if (category.empty())
         continue;
       // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
-      if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
-          category.at(category.size() - 1) == ')') {
-        category = category.substr(
+      if (StartsWith(category, kSyntheticDelayCategoryFilterPrefix,
+                     CompareCase::SENSITIVE) &&
+          category.back() == ')') {
+        std::string synthetic_category = category.substr(
             strlen(kSyntheticDelayCategoryFilterPrefix),
             category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
-        size_t name_length = category.find(';');
+        size_t name_length = synthetic_category.find(';');
         if (name_length != std::string::npos && name_length > 0 &&
-            name_length != category.size() - 1) {
-          synthetic_delays_.push_back(category);
+            name_length != synthetic_category.size() - 1) {
+          synthetic_delays_.push_back(synthetic_category);
         }
-      } else if (category.at(0) == '-') {
+      } else if (category.front() == '-') {
         // Excluded categories start with '-'.
         // Remove '-' from category string.
-        category = category.substr(1);
-        excluded_categories_.push_back(category);
+        excluded_categories_.push_back(category.substr(1));
       } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
                                   TRACE_DISABLED_BY_DEFAULT("")) == 0) {
         disabled_categories_.push_back(category);
@@ -392,24 +409,23 @@
   enable_sampling_ = false;
   enable_systrace_ = false;
   enable_argument_filter_ = false;
-  if(!trace_options_string.empty()) {
-    std::vector<std::string> split = base::SplitString(
-        trace_options_string, ",", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
-    std::vector<std::string>::iterator iter;
-    for (iter = split.begin(); iter != split.end(); ++iter) {
-      if (*iter == kRecordUntilFull) {
+  if (!trace_options_string.empty()) {
+    std::vector<std::string> split =
+        SplitString(trace_options_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    for (const std::string& token : split) {
+      if (token == kRecordUntilFull) {
         record_mode_ = RECORD_UNTIL_FULL;
-      } else if (*iter == kRecordContinuously) {
+      } else if (token == kRecordContinuously) {
         record_mode_ = RECORD_CONTINUOUSLY;
-      } else if (*iter == kTraceToConsole) {
+      } else if (token == kTraceToConsole) {
         record_mode_ = ECHO_TO_CONSOLE;
-      } else if (*iter == kRecordAsMuchAsPossible) {
+      } else if (token == kRecordAsMuchAsPossible) {
         record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
-      } else if (*iter == kEnableSampling) {
+      } else if (token == kEnableSampling) {
         enable_sampling_ = true;
-      } else if (*iter == kEnableSystrace) {
+      } else if (token == kEnableSystrace) {
         enable_systrace_ = true;
-      } else if (*iter == kEnableArgumentFilter) {
+      } else if (token == kEnableArgumentFilter) {
         enable_argument_filter_ = true;
       }
     }
@@ -421,7 +437,7 @@
 }
 
 void TraceConfig::SetCategoriesFromIncludedList(
-    const base::ListValue& included_list) {
+    const ListValue& included_list) {
   included_categories_.clear();
   for (size_t i = 0; i < included_list.GetSize(); ++i) {
     std::string category;
@@ -437,7 +453,7 @@
 }
 
 void TraceConfig::SetCategoriesFromExcludedList(
-    const base::ListValue& excluded_list) {
+    const ListValue& excluded_list) {
   excluded_categories_.clear();
   for (size_t i = 0; i < excluded_list.GetSize(); ++i) {
     std::string category;
@@ -446,7 +462,7 @@
   }
 }
 
-void TraceConfig::SetSyntheticDelaysFromList(const base::ListValue& list) {
+void TraceConfig::SetSyntheticDelaysFromList(const ListValue& list) {
   synthetic_delays_.clear();
   for (size_t i = 0; i < list.GetSize(); ++i) {
     std::string delay;
@@ -461,107 +477,126 @@
   }
 }
 
-void TraceConfig::AddCategoryToDict(base::DictionaryValue& dict,
+void TraceConfig::AddCategoryToDict(DictionaryValue* dict,
                                     const char* param,
                                     const StringList& categories) const {
   if (categories.empty())
     return;
 
-  scoped_ptr<base::ListValue> list(new base::ListValue());
-  for (StringList::const_iterator ci = categories.begin();
-       ci != categories.end();
-       ++ci) {
-    list->AppendString(*ci);
-  }
-
-  dict.Set(param, std::move(list));
+  auto list = MakeUnique<ListValue>();
+  for (const std::string& category : categories)
+    list->AppendString(category);
+  dict->Set(param, std::move(list));
 }
 
-void TraceConfig::SetMemoryDumpConfig(
-    const base::DictionaryValue& memory_dump_config) {
-  memory_dump_config_.clear();
-
-  const base::ListValue* trigger_list = nullptr;
-  if (!memory_dump_config.GetList(kTriggersParam, &trigger_list) ||
-      trigger_list->GetSize() == 0) {
-    return;
+void TraceConfig::SetMemoryDumpConfigFromConfigDict(
+    const DictionaryValue& memory_dump_config) {
+  // Set allowed dump modes.
+  memory_dump_config_.allowed_dump_modes.clear();
+  const ListValue* allowed_modes_list;
+  if (memory_dump_config.GetList(kAllowedDumpModesParam, &allowed_modes_list)) {
+    for (size_t i = 0; i < allowed_modes_list->GetSize(); ++i) {
+      std::string level_of_detail_str;
+      allowed_modes_list->GetString(i, &level_of_detail_str);
+      memory_dump_config_.allowed_dump_modes.insert(
+          StringToMemoryDumpLevelOfDetail(level_of_detail_str));
+    }
+  } else {
+    // If allowed modes param is not given then allow all modes by default.
+    memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
   }
 
-  for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
-    const base::DictionaryValue* trigger = nullptr;
-    if (!trigger_list->GetDictionary(i, &trigger))
-      continue;
+  // Set triggers
+  memory_dump_config_.triggers.clear();
+  const ListValue* trigger_list = nullptr;
+  if (memory_dump_config.GetList(kTriggersParam, &trigger_list) &&
+      trigger_list->GetSize() > 0) {
+    for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
+      const DictionaryValue* trigger = nullptr;
+      if (!trigger_list->GetDictionary(i, &trigger))
+        continue;
 
-    MemoryDumpTriggerConfig dump_config;
-    int interval = 0;
+      int interval = 0;
+      if (!trigger->GetInteger(kPeriodicIntervalParam, &interval))
+        continue;
 
-    if (!trigger->GetInteger(kPeriodicIntervalParam, &interval)) {
-      continue;
+      DCHECK_GT(interval, 0);
+      MemoryDumpConfig::Trigger dump_config;
+      dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
+      std::string level_of_detail_str;
+      trigger->GetString(kModeParam, &level_of_detail_str);
+      dump_config.level_of_detail =
+          StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+      memory_dump_config_.triggers.push_back(dump_config);
     }
-    DCHECK_GT(interval, 0);
-    dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
-    std::string level_of_detail_str;
-    trigger->GetString(kModeParam, &level_of_detail_str);
-    dump_config.level_of_detail =
-        StringToMemoryDumpLevelOfDetail(level_of_detail_str);
-    memory_dump_config_.push_back(dump_config);
+  }
+
+  // Set heap profiler options
+  const DictionaryValue* heap_profiler_options = nullptr;
+  if (memory_dump_config.GetDictionary(kHeapProfilerOptions,
+                                       &heap_profiler_options)) {
+    int min_size_bytes = 0;
+    if (heap_profiler_options->GetInteger(kBreakdownThresholdBytes,
+                                         &min_size_bytes)
+        && min_size_bytes >= 0) {
+      memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+          static_cast<size_t>(min_size_bytes);
+    } else {
+      memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+          MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes;
+    }
   }
 }
 
 void TraceConfig::SetDefaultMemoryDumpConfig() {
-  memory_dump_config_.clear();
-  memory_dump_config_.push_back(kDefaultHeavyMemoryDumpTrigger);
-  memory_dump_config_.push_back(kDefaultLightMemoryDumpTrigger);
+  memory_dump_config_.Clear();
+  memory_dump_config_.triggers.push_back(kDefaultHeavyMemoryDumpTrigger);
+  memory_dump_config_.triggers.push_back(kDefaultLightMemoryDumpTrigger);
+  memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
 }
 
-void TraceConfig::ToDict(base::DictionaryValue& dict) const {
+std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
+  auto dict = MakeUnique<DictionaryValue>();
   switch (record_mode_) {
     case RECORD_UNTIL_FULL:
-      dict.SetString(kRecordModeParam, kRecordUntilFull);
+      dict->SetString(kRecordModeParam, kRecordUntilFull);
       break;
     case RECORD_CONTINUOUSLY:
-      dict.SetString(kRecordModeParam, kRecordContinuously);
+      dict->SetString(kRecordModeParam, kRecordContinuously);
       break;
     case RECORD_AS_MUCH_AS_POSSIBLE:
-      dict.SetString(kRecordModeParam, kRecordAsMuchAsPossible);
+      dict->SetString(kRecordModeParam, kRecordAsMuchAsPossible);
       break;
     case ECHO_TO_CONSOLE:
-      dict.SetString(kRecordModeParam, kTraceToConsole);
+      dict->SetString(kRecordModeParam, kTraceToConsole);
       break;
     default:
       NOTREACHED();
   }
 
-  if (enable_sampling_)
-    dict.SetBoolean(kEnableSamplingParam, true);
-  else
-    dict.SetBoolean(kEnableSamplingParam, false);
-
-  if (enable_systrace_)
-    dict.SetBoolean(kEnableSystraceParam, true);
-  else
-    dict.SetBoolean(kEnableSystraceParam, false);
-
-  if (enable_argument_filter_)
-    dict.SetBoolean(kEnableArgumentFilterParam, true);
-  else
-    dict.SetBoolean(kEnableArgumentFilterParam, false);
+  dict->SetBoolean(kEnableSamplingParam, enable_sampling_);
+  dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
+  dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
 
   StringList categories(included_categories_);
   categories.insert(categories.end(),
                     disabled_categories_.begin(),
                     disabled_categories_.end());
-  AddCategoryToDict(dict, kIncludedCategoriesParam, categories);
-  AddCategoryToDict(dict, kExcludedCategoriesParam, excluded_categories_);
-  AddCategoryToDict(dict, kSyntheticDelaysParam, synthetic_delays_);
+  AddCategoryToDict(dict.get(), kIncludedCategoriesParam, categories);
+  AddCategoryToDict(dict.get(), kExcludedCategoriesParam, excluded_categories_);
+  AddCategoryToDict(dict.get(), kSyntheticDelaysParam, synthetic_delays_);
 
   if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
-    scoped_ptr<base::DictionaryValue> memory_dump_config(
-        new base::DictionaryValue());
-    scoped_ptr<base::ListValue> triggers_list(new base::ListValue());
-    for (const MemoryDumpTriggerConfig& config : memory_dump_config_) {
-      scoped_ptr<base::DictionaryValue> trigger_dict(
-          new base::DictionaryValue());
+    auto allowed_modes = MakeUnique<ListValue>();
+    for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
+      allowed_modes->AppendString(MemoryDumpLevelOfDetailToString(dump_mode));
+
+    auto memory_dump_config = MakeUnique<DictionaryValue>();
+    memory_dump_config->Set(kAllowedDumpModesParam, std::move(allowed_modes));
+
+    auto triggers_list = MakeUnique<ListValue>();
+    for (const auto& config : memory_dump_config_.triggers) {
+      auto trigger_dict = MakeUnique<DictionaryValue>();
       trigger_dict->SetInteger(kPeriodicIntervalParam,
                                static_cast<int>(config.periodic_interval_ms));
       trigger_dict->SetString(
@@ -572,8 +607,18 @@
     // Empty triggers will still be specified explicitly since it means that
     // the periodic dumps are not enabled.
     memory_dump_config->Set(kTriggersParam, std::move(triggers_list));
-    dict.Set(kMemoryDumpConfigParam, std::move(memory_dump_config));
+
+    if (memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes !=
+        MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes) {
+      auto options = MakeUnique<DictionaryValue>();
+      options->SetInteger(
+          kBreakdownThresholdBytes,
+          memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+      memory_dump_config->Set(kHeapProfilerOptions, std::move(options));
+    }
+    dict->Set(kMemoryDumpConfigParam, std::move(memory_dump_config));
   }
+  return dict;
 }
 
 std::string TraceConfig::ToTraceOptionsString() const {
@@ -608,11 +653,10 @@
                                             bool included) const {
   bool prepend_comma = !out->empty();
   int token_cnt = 0;
-  for (StringList::const_iterator ci = values.begin();
-       ci != values.end(); ++ci) {
+  for (const std::string& category : values) {
     if (token_cnt > 0 || prepend_comma)
       StringAppendF(out, ",");
-    StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
+    StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
     ++token_cnt;
   }
 }
@@ -621,35 +665,28 @@
                                             std::string* out) const {
   bool prepend_comma = !out->empty();
   int token_cnt = 0;
-  for (StringList::const_iterator ci = delays.begin();
-       ci != delays.end(); ++ci) {
+  for (const std::string& category : delays) {
     if (token_cnt > 0 || prepend_comma)
       StringAppendF(out, ",");
     StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
-                  ci->c_str());
+                  category.c_str());
     ++token_cnt;
   }
 }
 
 bool TraceConfig::IsCategoryEnabled(const char* category_name) const {
-  StringList::const_iterator ci;
-
   // Check the disabled- filters and the disabled-* wildcard first so that a
   // "*" filter does not include the disabled.
-  for (ci = disabled_categories_.begin();
-       ci != disabled_categories_.end();
-       ++ci) {
-    if (base::MatchPattern(category_name, ci->c_str()))
+  for (const std::string& category : disabled_categories_) {
+    if (MatchPattern(category_name, category))
       return true;
   }
 
-  if (base::MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+  if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
     return false;
 
-  for (ci = included_categories_.begin();
-       ci != included_categories_.end();
-       ++ci) {
-    if (base::MatchPattern(category_name, ci->c_str()))
+  for (const std::string& category : included_categories_) {
+    if (MatchPattern(category_name, category))
       return true;
   }
 
@@ -657,10 +694,8 @@
 }
 
 bool TraceConfig::IsEmptyOrContainsLeadingOrTrailingWhitespace(
-    const std::string& str) {
-  return  str.empty() ||
-          str.at(0) == ' ' ||
-          str.at(str.length() - 1) == ' ';
+    StringPiece str) {
+  return str.empty() || str.front() == ' ' || str.back() == ' ';
 }
 
 bool TraceConfig::HasIncludedPatterns() const {
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index c7d3f4b..91d6f1f 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -7,11 +7,13 @@
 
 #include <stdint.h>
 
+#include <set>
 #include <string>
 #include <vector>
 
 #include "base/base_export.h"
 #include "base/gtest_prod_util.h"
+#include "base/strings/string_piece.h"
 #include "base/trace_event/memory_dump_request_args.h"
 #include "base/values.h"
 
@@ -38,17 +40,46 @@
 
 class BASE_EXPORT TraceConfig {
  public:
-  typedef std::vector<std::string> StringList;
+  using StringList = std::vector<std::string>;
 
-  // Specifies the memory dump config for tracing. Used only when
-  // "memory-infra" category is enabled.
-  struct MemoryDumpTriggerConfig {
-    uint32_t periodic_interval_ms;
-    MemoryDumpLevelOfDetail level_of_detail;
+  // Specifies the memory dump config for tracing.
+  // Used only when "memory-infra" category is enabled.
+  struct BASE_EXPORT MemoryDumpConfig {
+    MemoryDumpConfig();
+    MemoryDumpConfig(const MemoryDumpConfig& other);
+    ~MemoryDumpConfig();
+
+    // Specifies the triggers in the memory dump config.
+    struct Trigger {
+      uint32_t periodic_interval_ms;
+      MemoryDumpLevelOfDetail level_of_detail;
+    };
+
+    // Specifies the configuration options for the heap profiler.
+    struct HeapProfiler {
+      // Default value for |breakdown_threshold_bytes|.
+      enum { kDefaultBreakdownThresholdBytes = 1024 };
+
+      HeapProfiler();
+
+      // Reset the options to default.
+      void Clear();
+
+      uint32_t breakdown_threshold_bytes;
+    };
+
+    // Reset the values in the config.
+    void Clear();
+
+    // Set of memory dump modes allowed for the tracing session. The explicitly
+    // triggered dumps will be successful only if the dump mode is allowed in
+    // the config.
+    std::set<MemoryDumpLevelOfDetail> allowed_dump_modes;
+
+    std::vector<Trigger> triggers;
+    HeapProfiler heap_profiler_options;
   };
 
-  typedef std::vector<MemoryDumpTriggerConfig> MemoryDumpConfig;
-
   TraceConfig();
 
   // Create TraceConfig object from category filter and trace options strings.
@@ -94,11 +125,10 @@
   // Example: TraceConfig("DELAY(gpu.PresentingFrame;16;alternating)", "");
   //          would make swap buffers take at least 16 ms every other time it
   //          is called; and use default options.
-  TraceConfig(const std::string& category_filter_string,
-              const std::string& trace_options_string);
+  TraceConfig(StringPiece category_filter_string,
+              StringPiece trace_options_string);
 
-  TraceConfig(const std::string& category_filter_string,
-              TraceRecordMode record_mode);
+  TraceConfig(StringPiece category_filter_string, TraceRecordMode record_mode);
 
   // Create TraceConfig object from the trace config string.
   //
@@ -115,7 +145,7 @@
   //                             "inc_pattern*",
   //                             "disabled-by-default-memory-infra"],
   //     "excluded_categories": ["excluded", "exc_pattern*"],
-  //     "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"]
+  //     "synthetic_delays": ["test.Delay1;16", "test.Delay2;32"],
   //     "memory_dump_config": {
   //       "triggers": [
   //         {
@@ -128,7 +158,11 @@
   //
   // Note: memory_dump_config can be specified only if
   // disabled-by-default-memory-infra category is enabled.
-  explicit TraceConfig(const std::string& config_string);
+  explicit TraceConfig(StringPiece config_string);
+
+  // Functionally identical to the above, but takes a parsed dictionary as input
+  // instead of its JSON serialization.
+  explicit TraceConfig(const DictionaryValue& config);
 
   TraceConfig(const TraceConfig& tc);
 
@@ -153,14 +187,15 @@
   // formatted.
   std::string ToString() const;
 
-  // Returns a scoped_refptr and wrap TraceConfig in ConvertableToTraceFormat
-  scoped_refptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
+  // Returns a copy of the TraceConfig wrapped in a ConvertableToTraceFormat
+  std::unique_ptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
 
   // Write the string representation of the CategoryFilter part.
   std::string ToCategoryFilterString() const;
 
   // Returns true if at least one category in the list is enabled by this
-  // trace config.
+  // trace config. This is used to determine if the category filters are
+  // enabled in the TRACE_* macros.
   bool IsCategoryGroupEnabled(const char* category_group) const;
 
   // Merges config with the current TraceConfig
@@ -168,6 +203,9 @@
 
   void Clear();
 
+  // Clears and resets the memory dump config.
+  void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
+
   const MemoryDumpConfig& memory_dump_config() const {
     return memory_dump_config_;
   }
@@ -176,7 +214,6 @@
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
                            TraceConfigFromInvalidLegacyStrings);
-  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, ConstructDefaultTraceConfig);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidString);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromInvalidString);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
@@ -184,31 +221,36 @@
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromMemoryConfigString);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, LegacyStringToMemoryDumpConfig);
   FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, EmptyMemoryDumpConfigTest);
+  FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
+                           EmptyAndAsteriskCategoryFilterString);
 
   // The default trace config, used when none is provided.
   // Allows all non-disabled-by-default categories through, except if they end
   // in the suffix 'Debug' or 'Test'.
   void InitializeDefault();
 
-  // Initialize from the config string
-  void InitializeFromConfigString(const std::string& config_string);
+  // Initialize from a config dictionary.
+  void InitializeFromConfigDict(const DictionaryValue& dict);
+
+  // Initialize from a config string.
+  void InitializeFromConfigString(StringPiece config_string);
 
   // Initialize from category filter and trace options strings
-  void InitializeFromStrings(const std::string& category_filter_string,
-                             const std::string& trace_options_string);
+  void InitializeFromStrings(StringPiece category_filter_string,
+                             StringPiece trace_options_string);
 
-  void SetCategoriesFromIncludedList(const base::ListValue& included_list);
-  void SetCategoriesFromExcludedList(const base::ListValue& excluded_list);
-  void SetSyntheticDelaysFromList(const base::ListValue& list);
-  void AddCategoryToDict(base::DictionaryValue& dict,
+  void SetCategoriesFromIncludedList(const ListValue& included_list);
+  void SetCategoriesFromExcludedList(const ListValue& excluded_list);
+  void SetSyntheticDelaysFromList(const ListValue& list);
+  void AddCategoryToDict(DictionaryValue* dict,
                          const char* param,
                          const StringList& categories) const;
 
-  void SetMemoryDumpConfig(const base::DictionaryValue& memory_dump_config);
+  void SetMemoryDumpConfigFromConfigDict(
+      const DictionaryValue& memory_dump_config);
   void SetDefaultMemoryDumpConfig();
 
-  // Convert TraceConfig to the dict representation of the TraceConfig.
-  void ToDict(base::DictionaryValue& dict) const;
+  std::unique_ptr<DictionaryValue> ToDict() const;
 
   std::string ToTraceOptionsString() const;
 
@@ -218,11 +260,13 @@
   void WriteCategoryFilterString(const StringList& delays,
                                  std::string* out) const;
 
-  // Returns true if category is enable according to this trace config.
+  // Returns true if the category is enabled according to this trace config.
+  // This tells whether a category is enabled from the TraceConfig's
+  // perspective. Please refer to IsCategoryGroupEnabled() to determine if a
+  // category is enabled from the tracing runtime's perspective.
   bool IsCategoryEnabled(const char* category_name) const;
 
-  static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
-      const std::string& str);
+  static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(StringPiece str);
 
   bool HasIncludedPatterns() const;
 
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
index 8d8206f..6b47f8d 100644
--- a/base/trace_event/trace_config_memory_test_util.h
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -24,7 +24,11 @@
             "\"%s\""
           "],"
           "\"memory_dump_config\":{"
-            "\"triggers\":["
+             "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+             "\"heap_profiler_options\":{"
+               "\"breakdown_threshold_bytes\":2048"
+             "},"
+             "\"triggers\":["
               "{"
                 "\"mode\":\"light\","
                 "\"periodic_interval_ms\":%d"
@@ -49,6 +53,7 @@
             "\"%s\""
           "],"
           "\"memory_dump_config\":{"
+            "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
             "\"triggers\":["
             "]"
           "},"
@@ -68,6 +73,28 @@
           "\"record_mode\":\"record-until-full\""
         "}", MemoryDumpManager::kTraceCategory);
   }
+
+  static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
+    return StringPrintf(
+        "{"
+          "\"enable_argument_filter\":false,"
+          "\"enable_sampling\":false,"
+          "\"enable_systrace\":false,"
+          "\"included_categories\":["
+            "\"%s\""
+          "],"
+          "\"memory_dump_config\":{"
+             "\"allowed_dump_modes\":[\"background\"],"
+             "\"triggers\":["
+              "{"
+                "\"mode\":\"background\","
+                "\"periodic_interval_ms\":%d"
+              "}"
+            "]"
+          "},"
+          "\"record_mode\":\"record-until-full\""
+        "}", MemoryDumpManager::kTraceCategory, period_ms);
+  }
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index bd37880..4b46b2f 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -4,6 +4,7 @@
 
 #include <stddef.h>
 
+#include "base/json/json_reader.h"
 #include "base/macros.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/trace_config.h"
@@ -20,9 +21,51 @@
     "\"enable_argument_filter\":false,"
     "\"enable_sampling\":false,"
     "\"enable_systrace\":false,"
-    "\"excluded_categories\":[\"*Debug\",\"*Test\"],"
     "\"record_mode\":\"record-until-full\""
   "}";
+
+const char kCustomTraceConfigString[] =
+  "{"
+    "\"enable_argument_filter\":true,"
+    "\"enable_sampling\":true,"
+    "\"enable_systrace\":true,"
+    "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+    "\"included_categories\":[\"included\","
+                            "\"inc_pattern*\","
+                            "\"disabled-by-default-cc\","
+                            "\"disabled-by-default-memory-infra\"],"
+    "\"memory_dump_config\":{"
+      "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+      "\"heap_profiler_options\":{"
+        "\"breakdown_threshold_bytes\":10240"
+      "},"
+      "\"triggers\":["
+        "{\"mode\":\"light\",\"periodic_interval_ms\":50},"
+        "{\"mode\":\"detailed\",\"periodic_interval_ms\":1000}"
+      "]"
+    "},"
+    "\"record_mode\":\"record-continuously\","
+    "\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
+  "}";
+
+void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+
+  // Default trace config enables every category filter except the
+  // disabled-by-default-* ones.
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
+
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,not-excluded-category"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,disabled-by-default-cc"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled(
+      "disabled-by-default-cc,disabled-by-default-cc2"));
+}
+
 }  // namespace
 
 TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
@@ -130,9 +173,6 @@
                config.ToTraceOptionsString().c_str());
 
   // From category filter strings
-  config = TraceConfig("-*Debug,-*Test", "");
-  EXPECT_STREQ("-*Debug,-*Test", config.ToCategoryFilterString().c_str());
-
   config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*", "");
   EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
                config.ToCategoryFilterString().c_str());
@@ -232,31 +272,116 @@
 }
 
 TEST(TraceConfigTest, ConstructDefaultTraceConfig) {
-  // Make sure that upon an empty string, we fall back to the default config.
   TraceConfig tc;
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
+
+  // Constructors from category filter string and trace option string.
+  TraceConfig tc_asterisk("*", "");
+  EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+  TraceConfig tc_empty_category_filter("", "");
+  EXPECT_STREQ("", tc_empty_category_filter.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString,
+               tc_empty_category_filter.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_empty_category_filter);
+
+  // Constructor from JSON formated config string.
+  TraceConfig tc_empty_json_string("");
+  EXPECT_STREQ("", tc_empty_json_string.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString,
+               tc_empty_json_string.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_empty_json_string);
+
+  // Constructor from dictionary value.
+  DictionaryValue dict;
+  TraceConfig tc_dict(dict);
+  EXPECT_STREQ("", tc_dict.ToCategoryFilterString().c_str());
+  EXPECT_STREQ(kDefaultTraceConfigString, tc_dict.ToString().c_str());
+  CheckDefaultTraceConfigBehavior(tc_dict);
+}
+
+TEST(TraceConfigTest, EmptyAndAsteriskCategoryFilterString) {
+  TraceConfig tc_empty("", "");
+  TraceConfig tc_asterisk("*", "");
+
+  EXPECT_STREQ("", tc_empty.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("*", tc_asterisk.ToCategoryFilterString().c_str());
+
+  // Both fall back to default config.
+  CheckDefaultTraceConfigBehavior(tc_empty);
+  CheckDefaultTraceConfigBehavior(tc_asterisk);
+
+  // They differ only for internal checking.
+  EXPECT_FALSE(tc_empty.IsCategoryEnabled("Category1"));
+  EXPECT_FALSE(tc_empty.IsCategoryEnabled("not-excluded-category"));
+  EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("Category1"));
+  EXPECT_TRUE(tc_asterisk.IsCategoryEnabled("not-excluded-category"));
+}
+
+TEST(TraceConfigTest, DisabledByDefaultCategoryFilterString) {
+  TraceConfig tc("foo,disabled-by-default-foo", "");
+  EXPECT_STREQ("foo,disabled-by-default-foo",
+               tc.ToCategoryFilterString().c_str());
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+
+  // Enabling only the disabled-by-default-* category means the default ones
+  // are also enabled.
+  tc = TraceConfig("disabled-by-default-foo", "");
+  EXPECT_STREQ("disabled-by-default-foo", tc.ToCategoryFilterString().c_str());
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(tc.IsCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-bar"));
+}
+
+TEST(TraceConfigTest, TraceConfigFromDict) {
+  // Passing in empty dictionary will result in default trace config.
+  DictionaryValue dict;
+  TraceConfig tc(dict);
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
   EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
-  EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
 
-  EXPECT_FALSE(tc.IsCategoryEnabled("Category1"));
-  EXPECT_FALSE(tc.IsCategoryEnabled("not-excluded-category"));
-  EXPECT_FALSE(tc.IsCategoryEnabled("CategoryTest"));
-  EXPECT_FALSE(tc.IsCategoryEnabled("CategoryDebug"));
-  EXPECT_FALSE(tc.IsCategoryEnabled("disabled-by-default-cc"));
+  std::unique_ptr<Value> default_value(
+      JSONReader::Read(kDefaultTraceConfigString));
+  DCHECK(default_value);
+  const DictionaryValue* default_dict = nullptr;
+  bool is_dict = default_value->GetAsDictionary(&default_dict);
+  DCHECK(is_dict);
+  TraceConfig default_tc(*default_dict);
+  EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
+  EXPECT_FALSE(default_tc.IsSamplingEnabled());
+  EXPECT_FALSE(default_tc.IsSystraceEnabled());
+  EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
 
-  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1"));
-  EXPECT_TRUE(tc.IsCategoryGroupEnabled("not-excluded-category"));
-  EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryTest"));
-  EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug"));
-  EXPECT_FALSE(tc.IsCategoryGroupEnabled("disabled-by-default-cc"));
-
-  EXPECT_TRUE(tc.IsCategoryGroupEnabled("Category1,CategoryDebug"));
-  EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryDebug,Category1"));
-  EXPECT_TRUE(tc.IsCategoryGroupEnabled("CategoryTest,not-excluded-category"));
-  EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug,CategoryTest"));
+  std::unique_ptr<Value> custom_value(
+      JSONReader::Read(kCustomTraceConfigString));
+  DCHECK(custom_value);
+  const DictionaryValue* custom_dict = nullptr;
+  is_dict = custom_value->GetAsDictionary(&custom_dict);
+  DCHECK(is_dict);
+  TraceConfig custom_tc(*custom_dict);
+  EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
+  EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
+  EXPECT_TRUE(custom_tc.IsSamplingEnabled());
+  EXPECT_TRUE(custom_tc.IsSystraceEnabled());
+  EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("included,inc_pattern*,"
+               "disabled-by-default-cc,disabled-by-default-memory-infra,"
+               "-excluded,-exc_pattern*,"
+               "DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
+               custom_tc.ToCategoryFilterString().c_str());
 }
 
 TEST(TraceConfigTest, TraceConfigFromValidString) {
@@ -336,7 +461,8 @@
   EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
-  EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
 
   tc = TraceConfig("This is an invalid config string.");
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -344,7 +470,8 @@
   EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
-  EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
 
   tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -352,7 +479,8 @@
   EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
-  EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
 
   tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
   EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
@@ -360,7 +488,8 @@
   EXPECT_FALSE(tc.IsSamplingEnabled());
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
-  EXPECT_STREQ("-*Debug,-*Test", tc.ToCategoryFilterString().c_str());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
 
   // If the config string a dictionary formatted as a JSON string, it will
   // initialize TraceConfig with best effort.
@@ -370,6 +499,7 @@
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
 
   tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
   EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
@@ -377,6 +507,7 @@
   EXPECT_FALSE(tc.IsSystraceEnabled());
   EXPECT_FALSE(tc.IsArgumentFilterEnabled());
   EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+  CheckDefaultTraceConfigBehavior(tc);
 
   const char invalid_config_string[] =
     "{"
@@ -418,9 +549,7 @@
                  "\"enable_argument_filter\":false,"
                  "\"enable_sampling\":false,"
                  "\"enable_systrace\":false,"
-                 "\"excluded_categories\":["
-                   "\"*Debug\",\"*Test\",\"excluded\",\"exc_pattern*\""
-                 "],"
+                 "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
                  "\"record_mode\":\"record-until-full\""
                "}",
                tc.ToString().c_str());
@@ -499,20 +628,34 @@
 }
 
 TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
-  std::string tc_str =
+  std::string tc_str1 =
       TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
-  TraceConfig tc(tc_str);
-  EXPECT_EQ(tc_str, tc.ToString());
-  EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
-  EXPECT_EQ(2u, tc.memory_dump_config_.size());
+  TraceConfig tc1(tc_str1);
+  EXPECT_EQ(tc_str1, tc1.ToString());
+  EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+  ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
 
-  EXPECT_EQ(200u, tc.memory_dump_config_[0].periodic_interval_ms);
+  EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
-            tc.memory_dump_config_[0].level_of_detail);
+            tc1.memory_dump_config_.triggers[0].level_of_detail);
 
-  EXPECT_EQ(2000u, tc.memory_dump_config_[1].periodic_interval_ms);
+  EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
-            tc.memory_dump_config_[1].level_of_detail);
+            tc1.memory_dump_config_.triggers[1].level_of_detail);
+  EXPECT_EQ(
+      2048u,
+      tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+
+  std::string tc_str2 =
+      TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
+          1 /* period_ms */);
+  TraceConfig tc2(tc_str2);
+  EXPECT_EQ(tc_str2, tc2.ToString());
+  EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+  ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
+  EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+  EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
+            tc2.memory_dump_config_.triggers[0].level_of_detail);
 }
 
 TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
@@ -520,14 +663,22 @@
   TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
   EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
             tc.ToString());
-  EXPECT_EQ(0u, tc.memory_dump_config_.size());
+  EXPECT_EQ(0u, tc.memory_dump_config_.triggers.size());
+  EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+            ::kDefaultBreakdownThresholdBytes,
+            tc.memory_dump_config_.heap_profiler_options
+            .breakdown_threshold_bytes);
 }
 
 TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
   TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
   EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
   EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
-  EXPECT_EQ(2u, tc.memory_dump_config_.size());
+  EXPECT_EQ(2u, tc.memory_dump_config_.triggers.size());
+  EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+            ::kDefaultBreakdownThresholdBytes,
+            tc.memory_dump_config_.heap_profiler_options
+            .breakdown_threshold_bytes);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
index 6948d7c..f915780 100644
--- a/base/trace_event/trace_event.gypi
+++ b/base/trace_event/trace_event.gypi
@@ -4,7 +4,10 @@
 {
   'variables': {
     'trace_event_sources' : [
+      'trace_event/blame_context.cc',
+      'trace_event/blame_context.h',
       'trace_event/common/trace_event_common.h',
+      'trace_event/heap_profiler.h',
       'trace_event/heap_profiler_allocation_context.cc',
       'trace_event/heap_profiler_allocation_context.h',
       'trace_event/heap_profiler_allocation_context_tracker.cc',
@@ -32,15 +35,14 @@
       'trace_event/memory_dump_request_args.h',
       'trace_event/memory_dump_session_state.cc',
       'trace_event/memory_dump_session_state.h',
+      'trace_event/memory_infra_background_whitelist.cc',
+      'trace_event/memory_infra_background_whitelist.h',
       'trace_event/process_memory_dump.cc',
       'trace_event/process_memory_dump.h',
       'trace_event/process_memory_maps.cc',
       'trace_event/process_memory_maps.h',
-      'trace_event/process_memory_maps_dump_provider.h',
       'trace_event/process_memory_totals.cc',
       'trace_event/process_memory_totals.h',
-      'trace_event/process_memory_totals_dump_provider.cc',
-      'trace_event/process_memory_totals_dump_provider.h',
       'trace_event/trace_buffer.cc',
       'trace_event/trace_buffer.h',
       'trace_event/trace_config.cc',
@@ -70,6 +72,7 @@
       'trace_event/winheap_dump_provider_win.h',
     ],
     'trace_event_test_sources' : [
+      'trace_event/blame_context_unittest.cc',
       'trace_event/heap_profiler_allocation_context_tracker_unittest.cc',
       'trace_event/heap_profiler_allocation_register_unittest.cc',
       'trace_event/heap_profiler_heap_dump_writer_unittest.cc',
@@ -79,7 +82,6 @@
       'trace_event/memory_allocator_dump_unittest.cc',
       'trace_event/memory_dump_manager_unittest.cc',
       'trace_event/process_memory_dump_unittest.cc',
-      'trace_event/process_memory_totals_dump_provider_unittest.cc',
       'trace_event/trace_config_memory_test_util.h',
       'trace_event/trace_config_unittest.cc',
       'trace_event/trace_event_argument_unittest.cc',
@@ -89,20 +91,12 @@
       'trace_event/winheap_dump_provider_win_unittest.cc',
     ],
     'conditions': [
-      ['OS == "linux" or OS=="android" or OS=="mac"', {
+      ['OS == "linux" or OS=="android" or OS=="mac" or OS=="ios"', {
         'trace_event_sources': [
           'trace_event/malloc_dump_provider.cc',
           'trace_event/malloc_dump_provider.h',
         ],
       }],
-      ['OS == "linux" or OS == "android"', {
-          'trace_event_sources': [
-            'trace_event/process_memory_maps_dump_provider.cc',
-          ],
-          'trace_event_test_sources' : [
-            'trace_event/process_memory_maps_dump_provider_unittest.cc',
-          ],
-      }],
       ['OS == "android"', {
         'trace_event_test_sources' : [
           'trace_event/trace_event_android_unittest.cc',
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
index 75bb81b..a075898 100644
--- a/base/trace_event/trace_event.h
+++ b/base/trace_event/trace_event.h
@@ -7,7 +7,7 @@
 
 // This header file defines implementation details of how the trace macros in
 // trace_event_common.h collect and store trace events. Anything not
-// implementation-specific should go in trace_macros_common.h instead of here.
+// implementation-specific should go in trace_event_common.h instead of here.
 
 #include <stddef.h>
 #include <stdint.h>
@@ -18,6 +18,7 @@
 #include "base/macros.h"
 #include "base/time/time.h"
 #include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/trace_event_system_stats_monitor.h"
 #include "base/trace_event/trace_log.h"
 #include "build/build_config.h"
@@ -37,6 +38,11 @@
 #define TRACE_ID_DONT_MANGLE(id) \
     trace_event_internal::TraceID::DontMangle(id)
 
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+    trace_event_internal::TraceID::WithScope(scope, id)
+
 // Sets the current sample state to the given category and name (both must be
 // constant strings). These states are intended for a sampling profiler.
 // Implementation note: we store category and name together because we don't
@@ -99,12 +105,13 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    int num_args,
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT \
@@ -116,13 +123,14 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    unsigned long long bind_id,
 //                    int num_args,
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID \
@@ -135,13 +143,14 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    int process_id,
 //                    int num_args,
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID \
@@ -153,6 +162,7 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    int thread_id,
 //                    const TimeTicks& timestamp,
@@ -160,7 +170,7 @@
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
@@ -178,9 +188,10 @@
 // Adds a metadata event to the trace log. The |AppendValueAsTraceFormat| method
 // on the convertable value will be called at flush time.
 // TRACE_EVENT_API_ADD_METADATA_EVENT(
-//   const char* event_name,
-//   const char* arg_name,
-//   scoped_refptr<ConvertableToTraceFormat> arg_value)
+//     const unsigned char* category_group_enabled,
+//     const char* event_name,
+//     const char* arg_name,
+//     std::unique_ptr<ConvertableToTraceFormat> arg_value)
 #define TRACE_EVENT_API_ADD_METADATA_EVENT \
     trace_event_internal::AddMetadataEvent
 
@@ -244,8 +255,8 @@
       if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
         trace_event_internal::AddTraceEvent( \
             phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-            trace_event_internal::kNoId, flags, \
-            trace_event_internal::kNoId, ##__VA_ARGS__); \
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+            flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
       } \
     } while (0)
 
@@ -260,8 +271,9 @@
           trace_event_internal::AddTraceEvent( \
               TRACE_EVENT_PHASE_COMPLETE, \
               INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-              trace_event_internal::kNoId, TRACE_EVENT_FLAG_NONE, \
-              trace_event_internal::kNoId, ##__VA_ARGS__); \
+              trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+              TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
+              ##__VA_ARGS__); \
       INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
           INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
     }
@@ -278,8 +290,8 @@
         trace_event_internal::AddTraceEvent( \
             TRACE_EVENT_PHASE_COMPLETE, \
             INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-            trace_event_internal::kNoId, trace_event_flags, \
-            trace_event_bind_id.data(), ##__VA_ARGS__); \
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+            trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
     INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
         INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
   }
@@ -296,8 +308,8 @@
             id, &trace_event_flags); \
         trace_event_internal::AddTraceEvent( \
             phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
-            name, trace_event_trace_id.data(), trace_event_flags, \
-            trace_event_internal::kNoId, ##__VA_ARGS__); \
+            name, trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+            trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
       } \
     } while (0)
 
@@ -310,7 +322,8 @@
     if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) {  \
       trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(           \
           phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,     \
-          trace_event_internal::kNoId, TRACE_EVENT_API_CURRENT_THREAD_ID,    \
+          trace_event_internal::kGlobalScope, trace_event_internal::kNoId,   \
+          TRACE_EVENT_API_CURRENT_THREAD_ID,                                 \
           base::TimeTicks::FromInternalValue(timestamp),                     \
           flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,                       \
           trace_event_internal::kNoId, ##__VA_ARGS__);                       \
@@ -329,18 +342,62 @@
                                                          &trace_event_flags); \
       trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(            \
           phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,      \
-          trace_event_trace_id.data(), thread_id,                             \
-          base::TimeTicks::FromInternalValue(timestamp),                      \
+          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),        \
+          thread_id, base::TimeTicks::FromInternalValue(timestamp),           \
           trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,            \
           trace_event_internal::kNoId, ##__VA_ARGS__);                        \
     }                                                                         \
   } while (0)
 
+// Implementation detail: internal macro to create static category and add
+// metadata event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...)        \
+  do {                                                                      \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                 \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+      TRACE_EVENT_API_ADD_METADATA_EVENT(                                   \
+          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,           \
+          ##__VA_ARGS__);                                                   \
+    }                                                                       \
+  } while (0)
+
+// Implementation detail: internal macro to enter and leave a
+// context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+  struct INTERNAL_TRACE_EVENT_UID(ScopedContext) {                         \
+   public:                                                                 \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) {    \
+      TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+    ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() {                           \
+      TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+                                                                           \
+   private:                                                                \
+    uint64_t cid_;                                                         \
+    /* Local class friendly DISALLOW_COPY_AND_ASSIGN */                    \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)                                \
+    (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {};                   \
+    void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {};     \
+  };                                                                       \
+  INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
+  INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+
+// Implementation detail: internal macro to trace a task execution with the
+// location where it was posted from.
+#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task)                 \
+  TRACE_EVENT2("toplevel", run_function, "src_file",                      \
+               (task).posted_from.file_name(), "src_func",                \
+               (task).posted_from.function_name());                       \
+  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
+      task_event)((task).posted_from.file_name());
+
 namespace trace_event_internal {
 
 // Specify these values when the corresponding argument of AddTraceEvent is not
 // used.
 const int kZeroNumArgs = 0;
+const std::nullptr_t kGlobalScope = nullptr;
 const unsigned long long kNoId = 0;
 
 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
@@ -348,87 +405,111 @@
 // collide when the same pointer is used on different processes.
 class TraceID {
  public:
+  class WithScope {
+   public:
+    WithScope(const char* scope, unsigned long long raw_id)
+        : scope_(scope), raw_id_(raw_id) {}
+    unsigned long long raw_id() const { return raw_id_; }
+    const char* scope() const { return scope_; }
+   private:
+    const char* scope_ = nullptr;
+    unsigned long long raw_id_;
+  };
+
   class DontMangle {
    public:
-    explicit DontMangle(const void* id)
-        : data_(static_cast<unsigned long long>(
-              reinterpret_cast<uintptr_t>(id))) {}
-    explicit DontMangle(unsigned long long id) : data_(id) {}
-    explicit DontMangle(unsigned long id) : data_(id) {}
-    explicit DontMangle(unsigned int id) : data_(id) {}
-    explicit DontMangle(unsigned short id) : data_(id) {}
-    explicit DontMangle(unsigned char id) : data_(id) {}
-    explicit DontMangle(long long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(int id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(short id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(signed char id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    unsigned long long data() const { return data_; }
+    explicit DontMangle(const void* raw_id)
+        : raw_id_(static_cast<unsigned long long>(
+              reinterpret_cast<uintptr_t>(raw_id))) {}
+    explicit DontMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(long long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(int raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(short raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(signed char raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(WithScope scoped_id)
+        : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+    const char* scope() const { return scope_; }
+    unsigned long long raw_id() const { return raw_id_; }
    private:
-    unsigned long long data_;
+    const char* scope_ = nullptr;
+    unsigned long long raw_id_;
   };
 
   class ForceMangle {
    public:
-    explicit ForceMangle(unsigned long long id) : data_(id) {}
-    explicit ForceMangle(unsigned long id) : data_(id) {}
-    explicit ForceMangle(unsigned int id) : data_(id) {}
-    explicit ForceMangle(unsigned short id) : data_(id) {}
-    explicit ForceMangle(unsigned char id) : data_(id) {}
-    explicit ForceMangle(long long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(int id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(short id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(signed char id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    unsigned long long data() const { return data_; }
+    explicit ForceMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(long long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(int raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(short raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(signed char raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    unsigned long long raw_id() const { return raw_id_; }
    private:
-    unsigned long long data_;
+    unsigned long long raw_id_;
   };
-  TraceID(const void* id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(
-              reinterpret_cast<uintptr_t>(id))) {
+  TraceID(const void* raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(
+                reinterpret_cast<uintptr_t>(raw_id))) {
     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) {
+  TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(DontMangle id, unsigned int* /* flags */) : data_(id.data()) {
+  TraceID(DontMangle maybe_scoped_id, unsigned int* /*flags*/)
+      : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
+  TraceID(unsigned long long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
   }
-  TraceID(unsigned long long id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned long id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned int id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned short id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned char id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(long long id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(long id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(int id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(short id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(signed char id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+  TraceID(unsigned long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned short raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(long long raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(long raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(int raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(short raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(signed char raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(WithScope scoped_id, unsigned int* /*flags*/)
+      : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
 
-  unsigned long long data() const { return data_; }
+  unsigned long long raw_id() const { return raw_id_; }
+  const char* scope() const { return scope_; }
 
  private:
-  unsigned long long data_;
+  const char* scope_ = nullptr;
+  unsigned long long raw_id_;
 };
 
 // Simple union to store various types as unsigned long long.
@@ -539,32 +620,37 @@
 // pointers to the internal c_str and pass through to the tracing API,
 // the arg_values must live throughout these procedures.
 
+template <class ARG1_CONVERTABLE_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>&
-        arg1_val) {
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
   const int num_args = 1;
   unsigned char arg_types[1] = { TRACE_VALUE_TYPE_CONVERTABLE };
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[1] = {std::move(arg1_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, &arg1_name, arg_types, NULL, &arg1_val, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, &arg1_name, arg_types, NULL, convertable_values,
+      flags);
 }
 
-template<class ARG1_TYPE>
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
@@ -573,8 +659,7 @@
     const char* arg1_name,
     const ARG1_TYPE& arg1_val,
     const char* arg2_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>&
-        arg2_val) {
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = { arg1_name, arg2_name };
 
@@ -582,29 +667,28 @@
   unsigned long long arg_values[2];
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   arg_types[1] = TRACE_VALUE_TYPE_CONVERTABLE;
-
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[2];
-  convertable_values[1] = arg2_val;
-
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {nullptr, std::move(arg2_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+      flags);
 }
 
-template<class ARG2_TYPE>
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>& arg1_val,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
     const ARG2_TYPE& arg2_val) {
   const int num_args = 2;
@@ -615,41 +699,40 @@
   arg_types[0] = TRACE_VALUE_TYPE_CONVERTABLE;
   arg_values[0] = 0;
   SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
-
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[2];
-  convertable_values[0] = arg1_val;
-
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {std::move(arg1_val), nullptr};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+      flags);
 }
 
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>& arg1_val,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>&
-        arg2_val) {
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = { arg1_name, arg2_name };
   unsigned char arg_types[2] =
       { TRACE_VALUE_TYPE_CONVERTABLE, TRACE_VALUE_TYPE_CONVERTABLE };
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[2] = {arg1_val, arg2_val};
-
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {std::move(arg1_val), std::move(arg2_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, NULL, convertable_values, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, NULL, convertable_values,
+      flags);
 }
 
 static inline base::trace_event::TraceEventHandle
@@ -657,27 +740,30 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id) {
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
 }
 
 static inline base::trace_event::TraceEventHandle AddTraceEvent(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned int flags,
     unsigned long long bind_id) {
   const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   const base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
-      phase, category_group_enabled, name, id, thread_id, now, flags, bind_id);
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id);
 }
 
 template<class ARG1_TYPE>
@@ -686,6 +772,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
@@ -698,8 +785,8 @@
   unsigned long long arg_values[1];
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, &arg1_name, arg_types, arg_values, NULL, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, &arg1_name, arg_types, arg_values, NULL, flags);
 }
 
 template<class ARG1_TYPE>
@@ -707,6 +794,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned int flags,
     unsigned long long bind_id,
@@ -714,9 +802,27 @@
     const ARG1_TYPE& arg1_val) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
-  return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
-                                               name, id, thread_id, now, flags,
-                                               bind_id, arg1_name, arg1_val);
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val);
+}
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val));
 }
 
 template<class ARG1_TYPE, class ARG2_TYPE>
@@ -725,6 +831,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
@@ -741,8 +848,68 @@
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, arg_values, NULL, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, NULL, flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    const ARG2_TYPE& arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val), arg2_name, arg2_val);
+}
+
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val,
+    const char* arg2_name,
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val, arg2_name, std::move(arg2_val));
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val), arg2_name, std::move(arg2_val));
 }
 
 template<class ARG1_TYPE, class ARG2_TYPE>
@@ -750,6 +917,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned int flags,
     unsigned long long bind_id,
@@ -760,20 +928,22 @@
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
-      phase, category_group_enabled, name, id, thread_id, now, flags, bind_id,
-      arg1_name, arg1_val, arg2_name, arg2_val);
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val, arg2_name, arg2_val);
 }
 
+template <class ARG1_CONVERTABLE_TYPE>
 static inline void AddMetadataEvent(
+    const unsigned char* category_group_enabled,
     const char* event_name,
     const char* arg_name,
-    scoped_refptr<base::trace_event::ConvertableToTraceFormat> arg_value) {
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg_value) {
   const char* arg_names[1] = {arg_name};
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[1] = {arg_value};
   unsigned char arg_types[1] = {TRACE_VALUE_TYPE_CONVERTABLE};
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[1] = {std::move(arg_value)};
   base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
-      event_name,
+      category_group_enabled, event_name,
       1,  // num_args
       arg_names, arg_types,
       nullptr,  // arg_values
@@ -781,7 +951,8 @@
 }
 
 template <class ARG1_TYPE>
-static void AddMetadataEvent(const char* event_name,
+static void AddMetadataEvent(const unsigned char* category_group_enabled,
+                             const char* event_name,
                              const char* arg_name,
                              const ARG1_TYPE& arg_val) {
   const int num_args = 1;
@@ -791,8 +962,8 @@
   SetTraceValue(arg_val, &arg_types[0], &arg_values[0]);
 
   base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
-      event_name, num_args, arg_names, arg_types, arg_values, nullptr,
-      TRACE_EVENT_FLAG_NONE);
+      category_group_enabled, event_name, num_args, arg_names, arg_types,
+      arg_values, nullptr, TRACE_EVENT_FLAG_NONE);
 }
 
 // Used by TRACE_EVENTx macros. Do not use directly.
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
index 6d787c8..336d964 100644
--- a/base/trace_event/trace_event_argument.cc
+++ b/base/trace_event/trace_event_argument.cc
@@ -10,6 +10,7 @@
 
 #include "base/bits.h"
 #include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
 #include "base/trace_event/trace_event_memory_overhead.h"
 #include "base/values.h"
 
@@ -234,7 +235,8 @@
   pickle_.WriteBytes(&kTypeEndArray, 1);
 }
 
-void TracedValue::SetValue(const char* name, scoped_ptr<base::Value> value) {
+void TracedValue::SetValue(const char* name,
+                           std::unique_ptr<base::Value> value) {
   SetBaseValueWithCopiedName(name, *value);
 }
 
@@ -286,7 +288,7 @@
       const ListValue* list_value;
       value.GetAsList(&list_value);
       BeginArrayWithCopiedName(name);
-      for (base::Value* base_value : *list_value)
+      for (const auto& base_value : *list_value)
         AppendBaseValue(*base_value);
       EndArray();
     } break;
@@ -340,15 +342,15 @@
       const ListValue* list_value;
       value.GetAsList(&list_value);
       BeginArray();
-      for (base::Value* base_value : *list_value)
+      for (const auto& base_value : *list_value)
         AppendBaseValue(*base_value);
       EndArray();
     } break;
   }
 }
 
-scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
-  scoped_ptr<DictionaryValue> root(new DictionaryValue);
+std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
+  std::unique_ptr<DictionaryValue> root(new DictionaryValue);
   DictionaryValue* cur_dict = root.get();
   ListValue* cur_list = nullptr;
   std::vector<Value*> stack;
@@ -359,14 +361,14 @@
     DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
     switch (*type) {
       case kTypeStartDict: {
-        auto new_dict = new DictionaryValue();
+        auto* new_dict = new DictionaryValue();
         if (cur_dict) {
           cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
-                                            make_scoped_ptr(new_dict));
+                                            WrapUnique(new_dict));
           stack.push_back(cur_dict);
           cur_dict = new_dict;
         } else {
-          cur_list->Append(make_scoped_ptr(new_dict));
+          cur_list->Append(WrapUnique(new_dict));
           stack.push_back(cur_list);
           cur_list = nullptr;
           cur_dict = new_dict;
@@ -384,15 +386,15 @@
       } break;
 
       case kTypeStartArray: {
-        auto new_list = new ListValue();
+        auto* new_list = new ListValue();
         if (cur_dict) {
           cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
-                                            make_scoped_ptr(new_list));
+                                            WrapUnique(new_list));
           stack.push_back(cur_dict);
           cur_dict = nullptr;
           cur_list = new_list;
         } else {
-          cur_list->Append(make_scoped_ptr(new_list));
+          cur_list->Append(WrapUnique(new_list));
           stack.push_back(cur_list);
           cur_list = new_list;
         }
@@ -460,14 +462,11 @@
 
 void TracedValue::EstimateTraceMemoryOverhead(
     TraceEventMemoryOverhead* overhead) {
-  const size_t kPickleHeapAlign = 4096;  // Must be == Pickle::kPickleHeapAlign.
   overhead->Add("TracedValue",
-
                 /* allocated size */
-                bits::Align(pickle_.GetTotalAllocatedSize(), kPickleHeapAlign),
-
+                pickle_.GetTotalAllocatedSize(),
                 /* resident size */
-                bits::Align(pickle_.size(), kPickleHeapAlign));
+                pickle_.size());
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_event_argument.h b/base/trace_event/trace_event_argument.h
index a127b0d..81d8c01 100644
--- a/base/trace_event/trace_event_argument.h
+++ b/base/trace_event/trace_event_argument.h
@@ -7,11 +7,11 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/pickle.h"
 #include "base/strings/string_piece.h"
 #include "base/trace_event/trace_event_impl.h"
@@ -26,6 +26,7 @@
  public:
   TracedValue();
   explicit TracedValue(size_t capacity);
+  ~TracedValue() override;
 
   void EndDictionary();
   void EndArray();
@@ -66,17 +67,15 @@
   // a copy-and-translation of the base::Value into the equivalent TracedValue.
   // TODO(primiano): migrate the (three) existing clients to the cheaper
   // SetValue(TracedValue) API. crbug.com/495628.
-  void SetValue(const char* name, scoped_ptr<base::Value> value);
+  void SetValue(const char* name, std::unique_ptr<base::Value> value);
   void SetBaseValueWithCopiedName(base::StringPiece name,
                                   const base::Value& value);
   void AppendBaseValue(const base::Value& value);
 
   // Public for tests only.
-  scoped_ptr<base::Value> ToBaseValue() const;
+  std::unique_ptr<base::Value> ToBaseValue() const;
 
  private:
-  ~TracedValue() override;
-
   Pickle pickle_;
 
 #ifndef NDEBUG
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
index 82436ba..61395f4 100644
--- a/base/trace_event/trace_event_argument_unittest.cc
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -8,6 +8,7 @@
 
 #include <utility>
 
+#include "base/memory/ptr_util.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -15,7 +16,7 @@
 namespace trace_event {
 
 TEST(TraceEventArgumentTest, FlatDictionary) {
-  scoped_refptr<TracedValue> value = new TracedValue();
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("int", 2014);
   value->SetDouble("double", 0.0);
   value->SetBoolean("bool", true);
@@ -28,7 +29,7 @@
 }
 
 TEST(TraceEventArgumentTest, NoDotPathExpansion) {
-  scoped_refptr<TracedValue> value = new TracedValue();
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("in.t", 2014);
   value->SetDouble("doub.le", 0.0);
   value->SetBoolean("bo.ol", true);
@@ -41,7 +42,7 @@
 }
 
 TEST(TraceEventArgumentTest, Hierarchy) {
-  scoped_refptr<TracedValue> value = new TracedValue();
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("i0", 2014);
   value->BeginDictionary("dict1");
   value->SetInteger("i1", 2014);
@@ -77,7 +78,7 @@
     kLongString3[i] = 'a' + (i % 25);
   kLongString3[sizeof(kLongString3) - 1] = '\0';
 
-  scoped_refptr<TracedValue> value = new TracedValue();
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetString("a", "short");
   value->SetString("b", kLongString);
   value->BeginArray("c");
@@ -100,20 +101,20 @@
   FundamentalValue bool_value(true);
   FundamentalValue double_value(42.0f);
 
-  auto dict_value = make_scoped_ptr(new DictionaryValue);
+  auto dict_value = WrapUnique(new DictionaryValue);
   dict_value->SetBoolean("bool", true);
   dict_value->SetInteger("int", 42);
   dict_value->SetDouble("double", 42.0f);
   dict_value->SetString("string", std::string("a") + "b");
   dict_value->SetString("string", std::string("a") + "b");
 
-  auto list_value = make_scoped_ptr(new ListValue);
+  auto list_value = WrapUnique(new ListValue);
   list_value->AppendBoolean(false);
   list_value->AppendInteger(1);
   list_value->AppendString("in_list");
   list_value->Append(std::move(dict_value));
 
-  scoped_refptr<TracedValue> value = new TracedValue();
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->BeginDictionary("outer_dict");
   value->SetValue("inner_list", std::move(list_value));
   value->EndDictionary();
@@ -130,10 +131,10 @@
 }
 
 TEST(TraceEventArgumentTest, PassTracedValue) {
-  auto dict_value = make_scoped_refptr(new TracedValue);
+  auto dict_value = WrapUnique(new TracedValue());
   dict_value->SetInteger("a", 1);
 
-  auto nested_dict_value = make_scoped_refptr(new TracedValue);
+  auto nested_dict_value = WrapUnique(new TracedValue());
   nested_dict_value->SetInteger("b", 2);
   nested_dict_value->BeginArray("c");
   nested_dict_value->AppendString("foo");
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index 24d6568..f469f2f 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -41,6 +41,7 @@
 
 TraceEvent::TraceEvent()
     : duration_(TimeDelta::FromInternalValue(-1)),
+      scope_(trace_event_internal::kGlobalScope),
       id_(0u),
       category_group_enabled_(NULL),
       name_(NULL),
@@ -55,26 +56,27 @@
 TraceEvent::~TraceEvent() {
 }
 
-void TraceEvent::CopyFrom(const TraceEvent& other) {
-  timestamp_ = other.timestamp_;
-  thread_timestamp_ = other.thread_timestamp_;
-  duration_ = other.duration_;
-  id_ = other.id_;
-  category_group_enabled_ = other.category_group_enabled_;
-  name_ = other.name_;
-  if (other.flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID)
-    process_id_ = other.process_id_;
+void TraceEvent::MoveFrom(std::unique_ptr<TraceEvent> other) {
+  timestamp_ = other->timestamp_;
+  thread_timestamp_ = other->thread_timestamp_;
+  duration_ = other->duration_;
+  scope_ = other->scope_;
+  id_ = other->id_;
+  category_group_enabled_ = other->category_group_enabled_;
+  name_ = other->name_;
+  if (other->flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID)
+    process_id_ = other->process_id_;
   else
-    thread_id_ = other.thread_id_;
-  phase_ = other.phase_;
-  flags_ = other.flags_;
-  parameter_copy_storage_ = other.parameter_copy_storage_;
+    thread_id_ = other->thread_id_;
+  phase_ = other->phase_;
+  flags_ = other->flags_;
+  parameter_copy_storage_ = std::move(other->parameter_copy_storage_);
 
   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
-    arg_names_[i] = other.arg_names_[i];
-    arg_types_[i] = other.arg_types_[i];
-    arg_values_[i] = other.arg_values_[i];
-    convertable_values_[i] = other.convertable_values_[i];
+    arg_names_[i] = other->arg_names_[i];
+    arg_types_[i] = other->arg_types_[i];
+    arg_values_[i] = other->arg_values_[i];
+    convertable_values_[i] = std::move(other->convertable_values_[i]);
   }
 }
 
@@ -85,17 +87,19 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned long long bind_id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   timestamp_ = timestamp;
   thread_timestamp_ = thread_timestamp;
   duration_ = TimeDelta::FromInternalValue(-1);
+  scope_ = scope;
   id_ = id;
   category_group_enabled_ = category_group_enabled;
   name_ = name;
@@ -111,22 +115,24 @@
     arg_names_[i] = arg_names[i];
     arg_types_[i] = arg_types[i];
 
-    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
-      convertable_values_[i] = convertable_values[i];
-    else
+    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+      convertable_values_[i] = std::move(convertable_values[i]);
+    } else {
       arg_values_[i].as_uint = arg_values[i];
+      convertable_values_[i].reset();
+    }
   }
   for (; i < kTraceMaxNumArgs; ++i) {
     arg_names_[i] = NULL;
     arg_values_[i].as_uint = 0u;
-    convertable_values_[i] = NULL;
+    convertable_values_[i].reset();
     arg_types_[i] = TRACE_VALUE_TYPE_UINT;
   }
 
   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
   size_t alloc_size = 0;
   if (copy) {
-    alloc_size += GetAllocLength(name);
+    alloc_size += GetAllocLength(name) + GetAllocLength(scope);
     for (i = 0; i < num_args; ++i) {
       alloc_size += GetAllocLength(arg_names_[i]);
       if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
@@ -147,12 +153,13 @@
   }
 
   if (alloc_size) {
-    parameter_copy_storage_ = new RefCountedString;
-    parameter_copy_storage_->data().resize(alloc_size);
-    char* ptr = string_as_array(&parameter_copy_storage_->data());
+    parameter_copy_storage_.reset(new std::string);
+    parameter_copy_storage_->resize(alloc_size);
+    char* ptr = string_as_array(parameter_copy_storage_.get());
     const char* end = ptr + alloc_size;
     if (copy) {
       CopyTraceEventParameter(&ptr, &name_, end);
+      CopyTraceEventParameter(&ptr, &scope_, end);
       for (i = 0; i < num_args; ++i) {
         CopyTraceEventParameter(&ptr, &arg_names_[i], end);
       }
@@ -171,9 +178,9 @@
   // Only reset fields that won't be initialized in Initialize(), or that may
   // hold references to other objects.
   duration_ = TimeDelta::FromInternalValue(-1);
-  parameter_copy_storage_ = NULL;
+  parameter_copy_storage_.reset();
   for (int i = 0; i < kTraceMaxNumArgs; ++i)
-    convertable_values_[i] = NULL;
+    convertable_values_[i].reset();
 }
 
 void TraceEvent::UpdateDuration(const TimeTicks& now,
@@ -191,11 +198,8 @@
     TraceEventMemoryOverhead* overhead) {
   overhead->Add("TraceEvent", sizeof(*this));
 
-  // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory,
-  // could be shared by several events and we might overcount. In practice
-  // this is unlikely but it's worth checking.
   if (parameter_copy_storage_)
-    overhead->AddRefCountedString(*parameter_copy_storage_.get());
+    overhead->AddString(*parameter_copy_storage_);
 
   for (size_t i = 0; i < kTraceMaxNumArgs; ++i) {
     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
@@ -257,7 +261,7 @@
       // So as not to lose bits from a 64-bit pointer, output as a hex string.
       StringAppendF(
           out, "\"0x%" PRIx64 "\"",
-          static_cast<uint64_t>(reinterpret_cast<intptr_t>(value.as_pointer)));
+          static_cast<uint64_t>(reinterpret_cast<uintptr_t>(value.as_pointer)));
       break;
     case TRACE_VALUE_TYPE_STRING:
     case TRACE_VALUE_TYPE_COPY_STRING:
@@ -289,10 +293,10 @@
   // Category group checked at category creation time.
   DCHECK(!strchr(name_, '"'));
   StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
-                     ","
-                     "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":",
-                process_id, thread_id, time_int64, phase_, category_group_name,
-                name_);
+                     ",\"ph\":\"%c\",\"cat\":\"%s\",\"name\":",
+                process_id, thread_id, time_int64, phase_, category_group_name);
+  EscapeJSONString(name_, true, out);
+  *out += ",\"args\":";
 
   // Output argument names and values, stop at first NULL argument name.
   // TODO(oysteine): The dual predicates here is a bit ugly; if the filtering
@@ -354,8 +358,11 @@
 
   // If id_ is set, print it out as a hex string so we don't loose any
   // bits (it might be a 64-bit pointer).
-  if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
+  if (flags_ & TRACE_EVENT_FLAG_HAS_ID) {
+    if (scope_ != trace_event_internal::kGlobalScope)
+      StringAppendF(out, ",\"scope\":\"%s\"", scope_);
     StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64_t>(id_));
+  }
 
   if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
     StringAppendF(out, ",\"bp\":\"e\"");
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
index 36461e2..4382217 100644
--- a/base/trace_event/trace_event_impl.h
+++ b/base/trace_event/trace_event_impl.h
@@ -8,6 +8,7 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <stack>
 #include <string>
 #include <vector>
@@ -17,7 +18,6 @@
 #include "base/callback.h"
 #include "base/containers/hash_tables.h"
 #include "base/macros.h"
-#include "base/memory/ref_counted_memory.h"
 #include "base/observer_list.h"
 #include "base/single_thread_task_runner.h"
 #include "base/strings/string_util.h"
@@ -44,9 +44,11 @@
 
 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
 // class must implement this interface.
-class BASE_EXPORT ConvertableToTraceFormat
-    : public RefCounted<ConvertableToTraceFormat> {
+class BASE_EXPORT ConvertableToTraceFormat {
  public:
+  ConvertableToTraceFormat() {}
+  virtual ~ConvertableToTraceFormat() {}
+
   // Append the class info to the provided |out| string. The appended
   // data must be a valid JSON object. Strings must be properly quoted, and
   // escaped. There is no processing applied to the content after it is
@@ -61,11 +63,8 @@
     return result;
   }
 
- protected:
-  virtual ~ConvertableToTraceFormat() {}
-
  private:
-  friend class RefCounted<ConvertableToTraceFormat>;
+  DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormat);
 };
 
 const int kTraceMaxNumArgs = 2;
@@ -93,25 +92,23 @@
   TraceEvent();
   ~TraceEvent();
 
-  // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
-  // Use explicit copy method to avoid accidentally misuse of copy.
-  void CopyFrom(const TraceEvent& other);
+  void MoveFrom(std::unique_ptr<TraceEvent> other);
 
-  void Initialize(
-      int thread_id,
-      TimeTicks timestamp,
-      ThreadTicks thread_timestamp,
-      char phase,
-      const unsigned char* category_group_enabled,
-      const char* name,
-      unsigned long long id,
-      unsigned long long bind_id,
-      int num_args,
-      const char** arg_names,
-      const unsigned char* arg_types,
-      const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
-      unsigned int flags);
+  void Initialize(int thread_id,
+                  TimeTicks timestamp,
+                  ThreadTicks thread_timestamp,
+                  char phase,
+                  const unsigned char* category_group_enabled,
+                  const char* name,
+                  const char* scope,
+                  unsigned long long id,
+                  unsigned long long bind_id,
+                  int num_args,
+                  const char** arg_names,
+                  const unsigned char* arg_types,
+                  const unsigned long long* arg_values,
+                  std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
+                  unsigned int flags);
 
   void Reset();
 
@@ -135,12 +132,13 @@
   int thread_id() const { return thread_id_; }
   TimeDelta duration() const { return duration_; }
   TimeDelta thread_duration() const { return thread_duration_; }
+  const char* scope() const { return scope_; }
   unsigned long long id() const { return id_; }
   unsigned int flags() const { return flags_; }
 
   // Exposed for unittesting:
 
-  const base::RefCountedString* parameter_copy_storage() const {
+  const std::string* parameter_copy_storage() const {
     return parameter_copy_storage_.get();
   }
 
@@ -160,14 +158,16 @@
   ThreadTicks thread_timestamp_;
   TimeDelta duration_;
   TimeDelta thread_duration_;
-  // id_ can be used to store phase-specific data.
+  // scope_ and id_ can be used to store phase-specific data.
+  const char* scope_;
   unsigned long long id_;
   TraceValue arg_values_[kTraceMaxNumArgs];
   const char* arg_names_[kTraceMaxNumArgs];
-  scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
+  std::unique_ptr<ConvertableToTraceFormat>
+      convertable_values_[kTraceMaxNumArgs];
   const unsigned char* category_group_enabled_;
   const char* name_;
-  scoped_refptr<base::RefCountedString> parameter_copy_storage_;
+  std::unique_ptr<std::string> parameter_copy_storage_;
   // Depending on TRACE_EVENT_FLAG_HAS_PROCESS_ID the event will have either:
   //  tid: thread_id_, pid: current_process_id (default case).
   //  tid: -1, pid: process_id_ (when flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID).
diff --git a/base/trace_event/trace_event_memory_overhead.cc b/base/trace_event/trace_event_memory_overhead.cc
index ba7207d..23579cb 100644
--- a/base/trace_event/trace_event_memory_overhead.cc
+++ b/base/trace_event/trace_event_memory_overhead.cc
@@ -104,7 +104,7 @@
       const ListValue* list_value = nullptr;
       value.GetAsList(&list_value);
       Add("ListValue", sizeof(ListValue));
-      for (const Value* v : *list_value)
+      for (const auto& v : *list_value)
         AddValue(*v);
     } break;
 
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 09f2a91..ff8ec2d 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -2,11 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/trace_event/trace_event.h"
+
 #include <math.h>
 #include <stddef.h>
 #include <stdint.h>
 
 #include <cstdlib>
+#include <memory>
+#include <utility>
 
 #include "base/bind.h"
 #include "base/command_line.h"
@@ -15,7 +19,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted_memory.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/singleton.h"
 #include "base/process/process_handle.h"
 #include "base/single_thread_task_runner.h"
@@ -27,7 +30,6 @@
 #include "base/threading/thread.h"
 #include "base/time/time.h"
 #include "base/trace_event/trace_buffer.h"
-#include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_synthetic_delay.h"
 #include "base/values.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -95,14 +97,18 @@
   }
 
   void CancelTrace() {
-    WaitableEvent flush_complete_event(false, false);
+    WaitableEvent flush_complete_event(
+        WaitableEvent::ResetPolicy::AUTOMATIC,
+        WaitableEvent::InitialState::NOT_SIGNALED);
     CancelTraceAsync(&flush_complete_event);
     flush_complete_event.Wait();
   }
 
   void EndTraceAndFlush() {
     num_flush_callbacks_ = 0;
-    WaitableEvent flush_complete_event(false, false);
+    WaitableEvent flush_complete_event(
+        WaitableEvent::ResetPolicy::AUTOMATIC,
+        WaitableEvent::InitialState::NOT_SIGNALED);
     EndTraceAndFlushAsync(&flush_complete_event);
     flush_complete_event.Wait();
   }
@@ -110,7 +116,9 @@
   // Used when testing thread-local buffers which requires the thread initiating
   // flush to have a message loop.
   void EndTraceAndFlushInThreadWithMessageLoop() {
-    WaitableEvent flush_complete_event(false, false);
+    WaitableEvent flush_complete_event(
+        WaitableEvent::ResetPolicy::AUTOMATIC,
+        WaitableEvent::InitialState::NOT_SIGNALED);
     Thread flush_thread("flush");
     flush_thread.Start();
     flush_thread.task_runner()->PostTask(
@@ -134,19 +142,6 @@
                    base::Unretained(flush_complete_event)));
   }
 
-  void FlushMonitoring() {
-    WaitableEvent flush_complete_event(false, false);
-    FlushMonitoring(&flush_complete_event);
-    flush_complete_event.Wait();
-  }
-
-  void FlushMonitoring(WaitableEvent* flush_complete_event) {
-    TraceLog::GetInstance()->FlushButLeaveBufferIntact(
-        base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
-                   base::Unretained(static_cast<TraceEventTestFixture*>(this)),
-                   base::Unretained(flush_complete_event)));
-  }
-
   void SetUp() override {
     const char* name = PlatformThread::GetName();
     old_thread_name_ = name ? strdup(name) : NULL;
@@ -196,7 +191,7 @@
   trace_buffer_.AddFragment(events_str->data());
   trace_buffer_.Finish();
 
-  scoped_ptr<Value> root = base::JSONReader::Read(
+  std::unique_ptr<Value> root = base::JSONReader::Read(
       json_output_.json_output, JSON_PARSE_RFC | JSON_DETACHABLE_CHILDREN);
 
   if (!root.get()) {
@@ -209,9 +204,9 @@
 
   // Move items into our aggregate collection
   while (root_list->GetSize()) {
-    scoped_ptr<Value> item;
+    std::unique_ptr<Value> item;
     root_list->Remove(0, &item);
-    trace_parsed_.Append(item.release());
+    trace_parsed_.Append(std::move(item));
   }
 
   if (!has_more_events)
@@ -279,15 +274,15 @@
 }
 
 void TraceEventTestFixture::DropTracedMetadataRecords() {
-  scoped_ptr<ListValue> old_trace_parsed(trace_parsed_.DeepCopy());
+  std::unique_ptr<ListValue> old_trace_parsed(trace_parsed_.CreateDeepCopy());
   size_t old_trace_parsed_size = old_trace_parsed->GetSize();
   trace_parsed_.Clear();
 
   for (size_t i = 0; i < old_trace_parsed_size; i++) {
-    Value* value = NULL;
+    Value* value = nullptr;
     old_trace_parsed->Get(i, &value);
     if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
-      trace_parsed_.Append(value->DeepCopy());
+      trace_parsed_.Append(value->CreateDeepCopy());
       continue;
     }
     DictionaryValue* dict = static_cast<DictionaryValue*>(value);
@@ -295,7 +290,7 @@
     if (dict->GetString("ph", &tmp) && tmp == "M")
       continue;
 
-    trace_parsed_.Append(value->DeepCopy());
+    trace_parsed_.Append(value->CreateDeepCopy());
   }
 }
 
@@ -501,8 +496,27 @@
                                               0x2128506);
     trackable.snapshot("world");
 
+    TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42), "hello");
+    TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+
     TRACE_EVENT1(kControlCharacters, kControlCharacters,
                  kControlCharacters, kControlCharacters);
+
+    uint64_t context_id = 0x20151021;
+
+    TRACE_EVENT_ENTER_CONTEXT("all", "TRACE_EVENT_ENTER_CONTEXT call",
+                              TRACE_ID_WITH_SCOPE("scope", context_id));
+    TRACE_EVENT_LEAVE_CONTEXT("all", "TRACE_EVENT_LEAVE_CONTEXT call",
+                              TRACE_ID_WITH_SCOPE("scope", context_id));
+    TRACE_EVENT_SCOPED_CONTEXT("disabled-by-default-cat",
+                               "TRACE_EVENT_SCOPED_CONTEXT disabled call",
+                               context_id);
+    TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
+                               context_id);
   }  // Scope close causes TRACE_EVENT0 etc to send their END events.
 
   if (task_complete_event)
@@ -800,6 +814,7 @@
 
     EXPECT_TRUE((item && item->GetString("ph", &phase)));
     EXPECT_EQ("N", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
     EXPECT_TRUE((item && item->GetString("id", &id)));
     EXPECT_EQ("0x42", id);
 
@@ -807,6 +822,7 @@
     EXPECT_TRUE(item);
     EXPECT_TRUE(item && item->GetString("ph", &phase));
     EXPECT_EQ("O", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
     EXPECT_TRUE(item && item->GetString("id", &id));
     EXPECT_EQ("0x42", id);
     EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
@@ -816,6 +832,7 @@
     EXPECT_TRUE(item);
     EXPECT_TRUE(item && item->GetString("ph", &phase));
     EXPECT_EQ("D", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
     EXPECT_TRUE(item && item->GetString("id", &id));
     EXPECT_EQ("0x42", id);
   }
@@ -848,8 +865,98 @@
     EXPECT_EQ("0x2128506", id);
   }
 
+  EXPECT_FIND_("tracked object 3");
+  {
+    std::string phase;
+    std::string scope;
+    std::string id;
+    std::string snapshot;
+
+    EXPECT_TRUE((item && item->GetString("ph", &phase)));
+    EXPECT_EQ("N", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x42", id);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("O", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+    EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+    EXPECT_EQ("hello", snapshot);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("D", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+  }
+
   EXPECT_FIND_(kControlCharacters);
   EXPECT_SUB_FIND_(kControlCharacters);
+
+  EXPECT_FIND_("TRACE_EVENT_ENTER_CONTEXT call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("(", ph);
+
+    std::string scope;
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_LEAVE_CONTEXT call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ(")", ph);
+
+    std::string scope;
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  std::vector<const DictionaryValue*> scoped_context_calls =
+      FindTraceEntries(trace_parsed, "TRACE_EVENT_SCOPED_CONTEXT call");
+  EXPECT_EQ(2u, scoped_context_calls.size());
+  {
+    item = scoped_context_calls[0];
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("(", ph);
+
+    std::string id;
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  {
+    item = scoped_context_calls[1];
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ(")", ph);
+
+    std::string id;
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
 }
 
 void TraceManyInstantEvents(int thread_id, int num_events,
@@ -897,6 +1004,17 @@
   }
 }
 
+void CheckTraceDefaultCategoryFilters(const TraceLog& trace_log) {
+  // Default enables all category filters except the disabled-by-default-* ones.
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("bar"));
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled("foo,bar"));
+  EXPECT_TRUE(*trace_log.GetCategoryGroupEnabled(
+        "foo,disabled-by-default-foo"));
+  EXPECT_FALSE(*trace_log.GetCategoryGroupEnabled(
+        "disabled-by-default-foo,disabled-by-default-bar"));
+}
+
 }  // namespace
 
 // Simple Test for emitting data and validating it was received.
@@ -1138,55 +1256,41 @@
   class Convertable : public ConvertableToTraceFormat {
    public:
     explicit Convertable(int* num_calls) : num_calls_(num_calls) {}
+    ~Convertable() override {}
     void AppendAsTraceFormat(std::string* out) const override {
       (*num_calls_)++;
       out->append("\"metadata_value\"");
     }
 
    private:
-    ~Convertable() override {}
     int* num_calls_;
   };
 
-  scoped_refptr<ConvertableToTraceFormat> convertable =
-      new Convertable(&num_calls);
+  std::unique_ptr<ConvertableToTraceFormat> conv1(new Convertable(&num_calls));
+  std::unique_ptr<Convertable> conv2(new Convertable(&num_calls));
 
   BeginTrace();
-  TRACE_EVENT_API_ADD_METADATA_EVENT("metadata_event_name", "metadata_arg_name",
-                                     convertable);
-
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_1",
+      "metadata_arg_name", std::move(conv1));
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_2",
+      "metadata_arg_name", std::move(conv2));
   // |AppendAsTraceFormat| should only be called on flush, not when the event
   // is added.
   ASSERT_EQ(0, num_calls);
   EndTraceAndFlush();
-  ASSERT_EQ(1, num_calls);
-  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_name", "M",
+  ASSERT_EQ(2, num_calls);
+  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_1", "M",
+                                    "metadata_arg_name", "metadata_value"));
+  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_2", "M",
                                     "metadata_arg_name", "metadata_value"));
 
   // The metadata event should only be adde to the current trace. In this new
   // trace, the event should not appear.
   BeginTrace();
   EndTraceAndFlush();
-  ASSERT_EQ(1, num_calls);
-
-  // Flushing should cause |AppendAsTraceFormat| to be called, but if the buffer
-  // is left intact, it the flush at the end of the trace should still call it;
-  // the metadata event should not be removed.
-  TraceLog::GetInstance()->SetEnabled(
-      TraceConfig(kRecordAllCategoryFilter,
-                  "record-until-full,enable-sampling"),
-      TraceLog::MONITORING_MODE);
-  TRACE_EVENT_API_ADD_METADATA_EVENT("metadata_event_name", "metadata_arg_name",
-                                     convertable);
-  FlushMonitoring();
   ASSERT_EQ(2, num_calls);
-
-  // Flushing the trace at this point will case |AppendAsTraceFormat| to be
-  // called twice: once for the event that was added by the monitoring flush,
-  // and once for the end trace flush; the metadata event will be duplicated.
-  // This is consistent with the other metadata events.
-  EndTraceAndFlush();
-  ASSERT_EQ(4, num_calls);
 }
 
 // Test that categories work.
@@ -1460,14 +1564,16 @@
     // Test that string arguments are copied.
     TraceEventHandle handle1 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", std::string("argval"), "arg2", std::string("argval"));
     // Test that static TRACE_STR_COPY string arguments are copied.
     TraceEventHandle handle2 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", TRACE_STR_COPY("argval"),
             "arg2", TRACE_STR_COPY("argval"));
     EXPECT_GT(tracer->GetStatus().event_count, 1u);
@@ -1489,16 +1595,18 @@
     // Test that static literal string arguments are not copied.
     TraceEventHandle handle1 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", "argval", "arg2", "argval");
     // Test that static TRACE_STR_COPY NULL string arguments are not copied.
     const char* str1 = NULL;
     const char* str2 = NULL;
     TraceEventHandle handle2 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", TRACE_STR_COPY(str1),
             "arg2", TRACE_STR_COPY(str2));
     EXPECT_GT(tracer->GetStatus().event_count, 1u);
@@ -1519,7 +1627,8 @@
   BeginTrace();
 
   Thread thread("1");
-  WaitableEvent task_complete_event(false, false);
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
   thread.Start();
 
   thread.task_runner()->PostTask(
@@ -1541,7 +1650,9 @@
   WaitableEvent* task_complete_events[num_threads];
   for (int i = 0; i < num_threads; i++) {
     threads[i] = new Thread(StringPrintf("Thread %d", i));
-    task_complete_events[i] = new WaitableEvent(false, false);
+    task_complete_events[i] =
+        new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
     threads[i]->Start();
     threads[i]->task_runner()->PostTask(
         FROM_HERE, base::Bind(&TraceManyInstantEvents, i, num_events,
@@ -1588,7 +1699,9 @@
   // Now run some trace code on these threads.
   WaitableEvent* task_complete_events[kNumThreads];
   for (int i = 0; i < kNumThreads; i++) {
-    task_complete_events[i] = new WaitableEvent(false, false);
+    task_complete_events[i] =
+        new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
+                          WaitableEvent::InitialState::NOT_SIGNALED);
     threads[i]->Start();
     thread_ids[i] = threads[i]->GetThreadId();
     threads[i]->task_runner()->PostTask(
@@ -1861,7 +1974,7 @@
   EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
   EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
   EXPECT_STREQ(
-    "-*Debug,-*Test",
+    "",
     trace_log->GetCurrentTraceConfig().ToCategoryFilterString().c_str());
   trace_log->SetDisabled();
   trace_log->SetDisabled();
@@ -1898,6 +2011,48 @@
   trace_log->SetDisabled();
 }
 
+TEST_F(TraceEventTestFixture, TraceWithDefaultCategoryFilters) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+
+  trace_log->SetEnabled(TraceConfig(), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+
+  trace_log->SetEnabled(TraceConfig("", ""), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+
+  trace_log->SetEnabled(TraceConfig("*", ""), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+
+  trace_log->SetEnabled(TraceConfig(""), TraceLog::RECORDING_MODE);
+  CheckTraceDefaultCategoryFilters(*trace_log);
+  trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
+  TraceLog* trace_log = TraceLog::GetInstance();
+
+  trace_log->SetEnabled(TraceConfig("foo,disabled-by-default-foo", ""),
+                        TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+  trace_log->SetDisabled();
+
+  // Enabling only the disabled-by-default-* category means the default ones
+  // are also enabled.
+  trace_log->SetEnabled(TraceConfig("disabled-by-default-foo", ""),
+                        TraceLog::RECORDING_MODE);
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+  EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+  EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-bar"));
+  trace_log->SetDisabled();
+}
+
 TEST_F(TraceEventTestFixture, TraceSampling) {
   TraceLog::GetInstance()->SetEnabled(
     TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
@@ -1948,62 +2103,16 @@
   EndTraceAndFlush();
 }
 
-TEST_F(TraceEventTestFixture, TraceContinuousSampling) {
-  TraceLog::GetInstance()->SetEnabled(
-    TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
-    TraceLog::MONITORING_MODE);
-
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "AAA");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "BBB");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
-  FlushMonitoring();
-
-  // Make sure we can get the profiled data.
-  EXPECT_TRUE(FindNamePhase("AAA", "P"));
-  EXPECT_TRUE(FindNamePhase("BBB", "P"));
-
-  Clear();
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "CCC");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "DDD");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
-  FlushMonitoring();
-
-  // Make sure the profiled data is accumulated.
-  EXPECT_TRUE(FindNamePhase("AAA", "P"));
-  EXPECT_TRUE(FindNamePhase("BBB", "P"));
-  EXPECT_TRUE(FindNamePhase("CCC", "P"));
-  EXPECT_TRUE(FindNamePhase("DDD", "P"));
-
-  Clear();
-
-  TraceLog::GetInstance()->SetDisabled();
-
-  // Make sure disabling the continuous sampling thread clears
-  // the profiled data.
-  EXPECT_FALSE(FindNamePhase("AAA", "P"));
-  EXPECT_FALSE(FindNamePhase("BBB", "P"));
-  EXPECT_FALSE(FindNamePhase("CCC", "P"));
-  EXPECT_FALSE(FindNamePhase("DDD", "P"));
-
-  Clear();
-}
-
 class MyData : public ConvertableToTraceFormat {
  public:
   MyData() {}
+  ~MyData() override {}
 
   void AppendAsTraceFormat(std::string* out) const override {
     out->append("{\"foo\":1}");
   }
 
  private:
-  ~MyData() override {}
   DISALLOW_COPY_AND_ASSIGN(MyData);
 };
 
@@ -2011,31 +2120,26 @@
   TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
                                       TraceLog::RECORDING_MODE);
 
-  scoped_refptr<ConvertableToTraceFormat> data(new MyData());
-  scoped_refptr<ConvertableToTraceFormat> data1(new MyData());
-  scoped_refptr<ConvertableToTraceFormat> data2(new MyData());
-  TRACE_EVENT1("foo", "bar", "data", data);
-  TRACE_EVENT2("foo", "baz",
-               "data1", data1,
-               "data2", data2);
+  std::unique_ptr<ConvertableToTraceFormat> data(new MyData());
+  std::unique_ptr<ConvertableToTraceFormat> data1(new MyData());
+  std::unique_ptr<ConvertableToTraceFormat> data2(new MyData());
+  TRACE_EVENT1("foo", "bar", "data", std::move(data));
+  TRACE_EVENT2("foo", "baz", "data1", std::move(data1), "data2",
+               std::move(data2));
 
-
-  scoped_refptr<ConvertableToTraceFormat> convertData1(new MyData());
-  scoped_refptr<ConvertableToTraceFormat> convertData2(new MyData());
-  TRACE_EVENT2(
-      "foo",
-      "string_first",
-      "str",
-      "string value 1",
-      "convert",
-      convertData1);
-  TRACE_EVENT2(
-      "foo",
-      "string_second",
-      "convert",
-      convertData2,
-      "str",
-      "string value 2");
+  // Check that std::unique_ptr<DerivedClassOfConvertable> are properly treated
+  // as
+  // convertable and not accidentally casted to bool.
+  std::unique_ptr<MyData> convertData1(new MyData());
+  std::unique_ptr<MyData> convertData2(new MyData());
+  std::unique_ptr<MyData> convertData3(new MyData());
+  std::unique_ptr<MyData> convertData4(new MyData());
+  TRACE_EVENT2("foo", "string_first", "str", "string value 1", "convert",
+               std::move(convertData1));
+  TRACE_EVENT2("foo", "string_second", "convert", std::move(convertData2),
+               "str", "string value 2");
+  TRACE_EVENT2("foo", "both_conv", "convert1", std::move(convertData3),
+               "convert2", std::move(convertData4));
   EndTraceAndFlush();
 
   // One arg version.
@@ -2110,6 +2214,21 @@
   ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
   EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
   EXPECT_EQ(1, foo_val);
+
+  dict = FindNamePhase("both_conv", "X");
+  ASSERT_TRUE(dict);
+
+  args_dict = NULL;
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  value = NULL;
+  convertable_dict = NULL;
+  foo_val = 0;
+  EXPECT_TRUE(args_dict->Get("convert1", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+  EXPECT_TRUE(args_dict->Get("convert2", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
 }
 
 TEST_F(TraceEventTestFixture, PrimitiveArgs) {
@@ -2276,6 +2395,16 @@
   EXPECT_EQ(1, int_value);
 }
 
+TEST_F(TraceEventTestFixture, NameIsEscaped) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT0("category", "name\\with\\backspaces");
+  EndTraceAndFlush();
+
+  EXPECT_TRUE(FindMatchingValue("cat", "category"));
+  EXPECT_TRUE(FindMatchingValue("name", "name\\with\\backspaces"));
+}
+
 namespace {
 
 bool IsArgNameWhitelisted(const char* arg_name) {
@@ -2403,6 +2532,7 @@
                        char phase,
                        const unsigned char* category_group_enabled,
                        const char* name,
+                       const char* scope,
                        unsigned long long id,
                        int num_args,
                        const char* const arg_names[],
@@ -2633,7 +2763,8 @@
   size_t chunk_index;
   EXPECT_EQ(0u, buffer->Size());
 
-  scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[num_chunks]);
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[num_chunks]);
   for (size_t i = 0; i < num_chunks; ++i) {
     chunks[i] = buffer->GetChunk(&chunk_index).release();
     EXPECT_TRUE(chunks[i]);
@@ -2649,7 +2780,7 @@
 
   // Return all chunks in original order.
   for (size_t i = 0; i < num_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   // Should recycle the chunks in the returned order.
   for (size_t i = 0; i < num_chunks; ++i) {
@@ -2662,9 +2793,8 @@
 
   // Return all chunks in reverse order.
   for (size_t i = 0; i < num_chunks; ++i) {
-    buffer->ReturnChunk(
-        num_chunks - i - 1,
-        scoped_ptr<TraceBufferChunk>(chunks[num_chunks - i - 1]));
+    buffer->ReturnChunk(num_chunks - i - 1, std::unique_ptr<TraceBufferChunk>(
+                                                chunks[num_chunks - i - 1]));
   }
 
   // Should recycle the chunks in the returned order.
@@ -2677,7 +2807,7 @@
   }
 
   for (size_t i = 0; i < num_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   TraceLog::GetInstance()->SetDisabled();
 }
@@ -2694,7 +2824,8 @@
   EXPECT_FALSE(buffer->NextChunk());
 
   size_t half_chunks = num_chunks / 2;
-  scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[half_chunks]);
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[half_chunks]);
 
   for (size_t i = 0; i < half_chunks; ++i) {
     chunks[i] = buffer->GetChunk(&chunk_index).release();
@@ -2702,7 +2833,7 @@
     EXPECT_EQ(i, chunk_index);
   }
   for (size_t i = 0; i < half_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   for (size_t i = 0; i < half_chunks; ++i)
     EXPECT_EQ(chunks[i], buffer->NextChunk());
@@ -2721,7 +2852,8 @@
   EXPECT_EQ(0u, buffer->Size());
   EXPECT_FALSE(buffer->NextChunk());
 
-  scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[num_chunks]);
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[num_chunks]);
 
   for (size_t i = 0; i < num_chunks; ++i) {
     chunks[i] = buffer->GetChunk(&chunk_index).release();
@@ -2729,7 +2861,7 @@
     EXPECT_EQ(i, chunk_index);
   }
   for (size_t i = 0; i < num_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   for (size_t i = 0; i < num_chunks; ++i)
     EXPECT_TRUE(chunks[i] == buffer->NextChunk());
@@ -2756,7 +2888,8 @@
   BeginTrace();
 
   Thread thread("1");
-  WaitableEvent task_complete_event(false, false);
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
   thread.Start();
   thread.task_runner()->PostTask(
       FROM_HERE, Bind(&TraceLog::SetCurrentThreadBlocksMessageLoop,
@@ -2766,8 +2899,10 @@
       FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
   task_complete_event.Wait();
 
-  WaitableEvent task_start_event(false, false);
-  WaitableEvent task_stop_event(false, false);
+  WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
   thread.task_runner()->PostTask(
       FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
   task_start_event.Wait();
@@ -2828,15 +2963,18 @@
   BeginTrace();
 
   Thread thread("1");
-  WaitableEvent task_complete_event(false, false);
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
   thread.Start();
 
   thread.task_runner()->PostTask(
       FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
   task_complete_event.Wait();
 
-  WaitableEvent task_start_event(false, false);
-  WaitableEvent task_stop_event(false, false);
+  WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
   thread.task_runner()->PostTask(
       FROM_HERE, Bind(&SetBlockingFlagAndBlockUntilStopped, &task_start_event,
                       &task_stop_event));
@@ -2853,7 +2991,8 @@
   BeginTrace();
 
   Thread thread("1");
-  WaitableEvent task_complete_event(false, false);
+  WaitableEvent task_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                    WaitableEvent::InitialState::NOT_SIGNALED);
   thread.Start();
 
   thread.task_runner()->PostTask(
@@ -2861,8 +3000,10 @@
   task_complete_event.Wait();
   task_complete_event.Reset();
 
-  WaitableEvent task_start_event(false, false);
-  WaitableEvent task_stop_event(false, false);
+  WaitableEvent task_start_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                 WaitableEvent::InitialState::NOT_SIGNALED);
+  WaitableEvent task_stop_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                WaitableEvent::InitialState::NOT_SIGNALED);
   thread.task_runner()->PostTask(
       FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
   task_start_event.Wait();
@@ -3032,5 +3173,12 @@
   EXPECT_EQ(filter, config.ToCategoryFilterString());
 }
 
+TEST_F(TraceEventTestFixture, ClockSyncEventsAreAlwaysAddedToTrace) {
+  BeginSpecificTrace("-*");
+  TRACE_EVENT_CLOCK_SYNC_RECEIVER(1);
+  EndTraceAndFlush();
+  EXPECT_TRUE(FindNamePhase("clock_sync", "c"));
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 17f6b66..12cebc6 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -6,15 +6,17 @@
 
 #include <algorithm>
 #include <cmath>
+#include <memory>
 #include <utility>
 
 #include "base/base_switches.h"
 #include "base/bind.h"
 #include "base/command_line.h"
+#include "base/debug/leak_annotations.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ref_counted_memory.h"
 #include "base/memory/singleton.h"
 #include "base/process/process_metrics.h"
 #include "base/stl_util.h"
@@ -22,11 +24,13 @@
 #include "base/strings/string_tokenizer.h"
 #include "base/strings/stringprintf.h"
 #include "base/sys_info.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/threading/worker_pool.h"
 #include "base/time/time.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/memory_dump_provider.h"
@@ -76,15 +80,13 @@
     "Too many vector buffer chunks");
 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
 
-// Can store results for 30 seconds with 1 ms sampling interval.
-const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
 
 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
 const int kThreadFlushTimeoutMs = 3000;
 
-#define MAX_CATEGORY_GROUPS 100
+#define MAX_CATEGORY_GROUPS 200
 
 // Parallel arrays g_category_groups and g_category_group_enabled are separate
 // so that a pointer to a member of g_category_group_enabled can be easily
@@ -138,6 +140,7 @@
       TRACE_EVENT_PHASE_METADATA,
       &g_category_group_enabled[g_category_metadata],
       metadata_name,
+      trace_event_internal::kGlobalScope,  // scope
       trace_event_internal::kNoId,  // id
       trace_event_internal::kNoId,  // bind_id
       num_args,
@@ -239,7 +242,7 @@
   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
   // as long as the thread exists.
   TraceLog* trace_log_;
-  scoped_ptr<TraceBufferChunk> chunk_;
+  std::unique_ptr<TraceBufferChunk> chunk_;
   size_t chunk_index_;
   int generation_;
 
@@ -305,9 +308,8 @@
   delete this;
 }
 
-bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(
-    const MemoryDumpArgs& /* args */,
-    ProcessMemoryDump* pmd) {
+bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs&,
+                                                    ProcessMemoryDump* pmd) {
   if (!chunk_)
     return true;
   std::string dump_base_name = StringPrintf(
@@ -331,6 +333,15 @@
   // find the generation mismatch and delete this buffer soon.
 }
 
+struct TraceLog::RegisteredAsyncObserver {
+  explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
+      : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
+  ~RegisteredAsyncObserver() {}
+
+  WeakPtr<AsyncEnabledStateObserver> observer;
+  scoped_refptr<SequencedTaskRunner> task_runner;
+};
+
 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
 
 TraceLogStatus::~TraceLogStatus() {}
@@ -364,6 +375,10 @@
   // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
   //                            sizeof(g_category_group_enabled),
   //                           "trace_event category enabled");
+  for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
+    ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
+                         "trace_event category enabled");
+  }
 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
   SetProcessID(0);
 #else
@@ -386,7 +401,8 @@
   // trace events will be added into the main buffer directly.
   if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
     return;
-  auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+  HEAP_PROFILER_SCOPED_IGNORE;
+  auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
   if (thread_local_event_buffer &&
       !CheckGeneration(thread_local_event_buffer->generation())) {
     delete thread_local_event_buffer;
@@ -398,8 +414,7 @@
   }
 }
 
-bool TraceLog::OnMemoryDump(const MemoryDumpArgs& /* args */,
-                            ProcessMemoryDump* pmd) {
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump* pmd) {
   // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
   // (crbug.com/499731).
   TraceEventMemoryOverhead overhead;
@@ -447,14 +462,15 @@
   unsigned char enabled_flag = 0;
   const char* category_group = g_category_groups[category_index];
   if (mode_ == RECORDING_MODE &&
-      trace_config_.IsCategoryGroupEnabled(category_group))
+      trace_config_.IsCategoryGroupEnabled(category_group)) {
     enabled_flag |= ENABLED_FOR_RECORDING;
-  else if (mode_ == MONITORING_MODE &&
-           trace_config_.IsCategoryGroupEnabled(category_group))
-    enabled_flag |= ENABLED_FOR_MONITORING;
+  }
+
   if (event_callback_ &&
-      event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
+      event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
     enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+  }
+
 #if defined(OS_WIN)
   if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
           category_group)) {
@@ -462,6 +478,12 @@
   }
 #endif
 
+  // TODO(primiano): this is a temporary workaround for catapult:#2341,
+  // to guarantee that metadata events are always added even if the category
+  // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+  if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
+    enabled_flag |= ENABLED_FOR_RECORDING;
+
   g_category_group_enabled[category_index] = enabled_flag;
 }
 
@@ -535,6 +557,7 @@
     // category groups with strings not known at compile time (this is
     // required by SetWatchEvent).
     const char* new_group = strdup(category_group);
+    ANNOTATE_LEAKING_OBJECT_PTR(new_group);
     g_category_groups[category_index] = new_group;
     DCHECK(!g_category_group_enabled[category_index]);
     // Note that if both included and excluded patterns in the
@@ -561,6 +584,7 @@
 
 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
   std::vector<EnabledStateObserver*> observer_list;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
   {
     AutoLock lock(lock_);
 
@@ -625,10 +649,16 @@
 
     dispatching_to_observer_list_ = true;
     observer_list = enabled_state_observer_list_;
+    observer_map = async_observers_;
   }
   // Notify observers outside the lock in case they trigger trace events.
   for (size_t i = 0; i < observer_list.size(); ++i)
     observer_list[i]->OnTraceLogEnabled();
+  for (const auto& it : observer_map) {
+    it.second.task_runner->PostTask(
+        FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
+                        it.second.observer));
+  }
 
   {
     AutoLock lock(lock_);
@@ -710,6 +740,8 @@
   dispatching_to_observer_list_ = true;
   std::vector<EnabledStateObserver*> observer_list =
       enabled_state_observer_list_;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map =
+      async_observers_;
 
   {
     // Dispatch to observers outside the lock in case the observer triggers a
@@ -717,6 +749,11 @@
     AutoUnlock unlock(lock_);
     for (size_t i = 0; i < observer_list.size(); ++i)
       observer_list[i]->OnTraceLogDisabled();
+    for (const auto& it : observer_map) {
+      it.second.task_runner->PostTask(
+          FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
+                          it.second.observer));
+    }
   }
   dispatching_to_observer_list_ = false;
 }
@@ -750,8 +787,8 @@
 TraceLogStatus TraceLog::GetStatus() const {
   AutoLock lock(lock_);
   TraceLogStatus result;
-  result.event_capacity = logged_events_->Capacity();
-  result.event_count = logged_events_->Size();
+  result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
+  result.event_count = static_cast<uint32_t>(logged_events_->Size());
   return result;
 }
 
@@ -859,7 +896,7 @@
     flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
                              ? ThreadTaskRunnerHandle::Get()
                              : nullptr;
-    DCHECK(!thread_message_loops_.size() || flush_task_runner_);
+    DCHECK(thread_message_loops_.empty() || flush_task_runner_);
     flush_output_callback_ = cb;
 
     if (thread_shared_chunk_) {
@@ -894,12 +931,13 @@
 
 // Usually it runs on a different thread.
 void TraceLog::ConvertTraceEventsToTraceFormat(
-    scoped_ptr<TraceBuffer> logged_events,
+    std::unique_ptr<TraceBuffer> logged_events,
     const OutputCallback& flush_output_callback,
     const ArgumentFilterPredicate& argument_filter_predicate) {
   if (flush_output_callback.is_null())
     return;
 
+  HEAP_PROFILER_SCOPED_IGNORE;
   // The callback need to be called at least once even if there is no events
   // to let the caller know the completion of flush.
   scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
@@ -920,7 +958,7 @@
 }
 
 void TraceLog::FinishFlush(int generation, bool discard_events) {
-  scoped_ptr<TraceBuffer> previous_logged_events;
+  std::unique_ptr<TraceBuffer> previous_logged_events;
   OutputCallback flush_output_callback;
   ArgumentFilterPredicate argument_filter_predicate;
 
@@ -1005,37 +1043,12 @@
     for (hash_set<MessageLoop*>::const_iterator it =
              thread_message_loops_.begin();
          it != thread_message_loops_.end(); ++it) {
-      LOG(WARNING) << "Thread: " << (*it)->thread_name();
+      LOG(WARNING) << "Thread: " << (*it)->GetThreadName();
     }
   }
   FinishFlush(generation, discard_events);
 }
 
-void TraceLog::FlushButLeaveBufferIntact(
-    const TraceLog::OutputCallback& flush_output_callback) {
-  scoped_ptr<TraceBuffer> previous_logged_events;
-  ArgumentFilterPredicate argument_filter_predicate;
-  {
-    AutoLock lock(lock_);
-    AddMetadataEventsWhileLocked();
-    if (thread_shared_chunk_) {
-      // Return the chunk to the main buffer to flush the sampling data.
-      logged_events_->ReturnChunk(thread_shared_chunk_index_,
-                                  std::move(thread_shared_chunk_));
-    }
-    previous_logged_events = logged_events_->CloneForIteration();
-
-    if (trace_options() & kInternalEnableArgumentFilter) {
-      CHECK(!argument_filter_predicate_.is_null());
-      argument_filter_predicate = argument_filter_predicate_;
-    }
-  }  // release lock
-
-  ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
-                                  flush_output_callback,
-                                  argument_filter_predicate);
-}
-
 void TraceLog::UseNextTraceBuffer() {
   logged_events_.reset(CreateTraceBuffer());
   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
@@ -1047,12 +1060,13 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
@@ -1060,6 +1074,7 @@
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       trace_event_internal::kNoId,  // bind_id
       thread_id,
@@ -1076,13 +1091,14 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned long long bind_id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
@@ -1090,6 +1106,7 @@
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       bind_id,
       thread_id,
@@ -1106,19 +1123,21 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int process_id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       trace_event_internal::kNoId,  // bind_id
       process_id,
@@ -1137,6 +1156,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const TimeTicks& timestamp,
@@ -1144,12 +1164,13 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   return AddTraceEventWithThreadIdAndTimestamp(
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       trace_event_internal::kNoId,  // bind_id
       thread_id,
@@ -1166,6 +1187,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned long long bind_id,
     int thread_id,
@@ -1174,7 +1196,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   TraceEventHandle handle = {0, 0, 0};
   if (!*category_group_enabled)
@@ -1204,7 +1226,7 @@
   // |thread_local_event_buffer_| can be null if the current thread doesn't have
   // a message loop or the message loop is blocked.
   InitializeThreadLocalEventBufferIfSupported();
-  auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+  auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
 
   // Check and update the current thread name only if the event is for the
   // current thread to avoid locks in most cases.
@@ -1253,8 +1275,7 @@
 #endif  // OS_WIN
 
   std::string console_message;
-  if (*category_group_enabled &
-      (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
+  if (*category_group_enabled & ENABLED_FOR_RECORDING) {
     OptionalAutoLock lock(&lock_);
 
     TraceEvent* trace_event = NULL;
@@ -1272,6 +1293,7 @@
                               phase,
                               category_group_enabled,
                               name,
+                              scope,
                               id,
                               bind_id,
                               num_args,
@@ -1318,39 +1340,52 @@
       event_callback(
           offset_event_timestamp,
           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
-          category_group_enabled, name, id, num_args, arg_names, arg_types,
-          arg_values, flags);
+          category_group_enabled, name, scope, id, num_args, arg_names,
+          arg_types, arg_values, flags);
     }
   }
 
-  if (base::trace_event::AllocationContextTracker::capture_enabled()) {
-    if (phase == TRACE_EVENT_PHASE_BEGIN || phase == TRACE_EVENT_PHASE_COMPLETE)
-      base::trace_event::AllocationContextTracker::PushPseudoStackFrame(name);
-    else if (phase == TRACE_EVENT_PHASE_END)
-      // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
-      // is in |TraceLog::UpdateTraceEventDuration|.
-      base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
+  // TODO(primiano): Add support for events with copied name crbug.com/581078
+  if (!(flags & TRACE_EVENT_FLAG_COPY)) {
+    if (AllocationContextTracker::capture_mode() ==
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
+      if (phase == TRACE_EVENT_PHASE_BEGIN ||
+          phase == TRACE_EVENT_PHASE_COMPLETE) {
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->PushPseudoStackFrame(name);
+      } else if (phase == TRACE_EVENT_PHASE_END) {
+        // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
+        // is in |TraceLog::UpdateTraceEventDuration|.
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->PopPseudoStackFrame(name);
+      }
+    }
   }
 
   return handle;
 }
 
 void TraceLog::AddMetadataEvent(
+    const unsigned char* category_group_enabled,
     const char* name,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
-  scoped_ptr<TraceEvent> trace_event(new TraceEvent);
+  HEAP_PROFILER_SCOPED_IGNORE;
+  std::unique_ptr<TraceEvent> trace_event(new TraceEvent);
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  ThreadTicks thread_now = ThreadNow();
+  TimeTicks now = OffsetNow();
   AutoLock lock(lock_);
   trace_event->Initialize(
-      0,  // thread_id
-      TimeTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA,
-      &g_category_group_enabled[g_category_metadata], name,
-      trace_event_internal::kNoId,  // id
-      trace_event_internal::kNoId,  // bind_id
+      thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
+      category_group_enabled, name,
+      trace_event_internal::kGlobalScope,  // scope
+      trace_event_internal::kNoId,         // id
+      trace_event_internal::kNoId,         // bind_id
       num_args, arg_names, arg_types, arg_values, convertable_values, flags);
   metadata_events_.push_back(std::move(trace_event));
 }
@@ -1360,6 +1395,7 @@
 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
                                             const TimeTicks& timestamp,
                                             TraceEvent* trace_event) {
+  HEAP_PROFILER_SCOPED_IGNORE;
   AutoLock thread_info_lock(thread_info_lock_);
 
   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
@@ -1446,9 +1482,11 @@
           EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
     }
 
-    if (base::trace_event::AllocationContextTracker::capture_enabled()) {
+    if (AllocationContextTracker::capture_mode() ==
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
       // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
-      base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PopPseudoStackFrame(name);
     }
   }
 
@@ -1459,9 +1497,10 @@
     EventCallback event_callback = reinterpret_cast<EventCallback>(
         subtle::NoBarrier_Load(&event_callback_));
     if (event_callback) {
-      event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
-                     trace_event_internal::kNoId, 0,
-                     nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+      event_callback(
+        now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
+        trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+        nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
     }
   }
 }
@@ -1492,9 +1531,12 @@
 void TraceLog::AddMetadataEventsWhileLocked() {
   lock_.AssertAcquired();
 
-  // Copy metadata added by |AddMetadataEvent| into the trace log.
-  for (const scoped_ptr<TraceEvent>& event : metadata_events_)
-    AddEventToThreadSharedChunkWhileLocked(nullptr, false)->CopyFrom(*event);
+  // Move metadata added by |AddMetadataEvent| into the trace log.
+  while (!metadata_events_.empty()) {
+    TraceEvent* event = AddEventToThreadSharedChunkWhileLocked(nullptr, false);
+    event->MoveFrom(std::move(metadata_events_.back()));
+    metadata_events_.pop_back();
+  }
 
 #if !defined(OS_NACL)  // NaCl shouldn't expose the process id.
   InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
@@ -1658,13 +1700,11 @@
 }
 
 TraceBuffer* TraceLog::CreateTraceBuffer() {
+  HEAP_PROFILER_SCOPED_IGNORE;
   InternalTraceOptions options = trace_options();
   if (options & kInternalRecordContinuously)
     return TraceBuffer::CreateTraceBufferRingBuffer(
         kTraceEventRingBufferChunks);
-  else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
-    return TraceBuffer::CreateTraceBufferRingBuffer(
-        kMonitorTraceEventBufferChunks);
   else if (options & kInternalEchoToConsole)
     return TraceBuffer::CreateTraceBufferRingBuffer(
         kEchoToConsoleTraceEventBufferChunks);
@@ -1699,6 +1739,25 @@
   overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
 }
 
+void TraceLog::AddAsyncEnabledStateObserver(
+    WeakPtr<AsyncEnabledStateObserver> listener) {
+  AutoLock lock(lock_);
+  async_observers_.insert(
+      std::make_pair(listener.get(), RegisteredAsyncObserver(listener)));
+}
+
+void TraceLog::RemoveAsyncEnabledStateObserver(
+    AsyncEnabledStateObserver* listener) {
+  AutoLock lock(lock_);
+  async_observers_.erase(listener);
+}
+
+bool TraceLog::HasAsyncEnabledStateObserver(
+    AsyncEnabledStateObserver* listener) const {
+  AutoLock lock(lock_);
+  return ContainsKey(async_observers_, listener);
+}
+
 }  // namespace trace_event
 }  // namespace base
 
@@ -1719,6 +1778,7 @@
             TRACE_EVENT_PHASE_COMPLETE,
             category_group_enabled_,
             name,
+            trace_event_internal::kGlobalScope,  // scope
             trace_event_internal::kNoId,  // id
             static_cast<int>(base::PlatformThread::CurrentId()),  // thread_id
             base::TimeTicks::Now(),
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index a079f04..e4407e8 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -8,6 +8,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -15,7 +16,6 @@
 #include "base/containers/hash_tables.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/scoped_vector.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/trace_config.h"
@@ -39,16 +39,15 @@
 struct BASE_EXPORT TraceLogStatus {
   TraceLogStatus();
   ~TraceLogStatus();
-  size_t event_capacity;
-  size_t event_count;
+  uint32_t event_capacity;
+  uint32_t event_count;
 };
 
 class BASE_EXPORT TraceLog : public MemoryDumpProvider {
  public:
   enum Mode {
     DISABLED = 0,
-    RECORDING_MODE,
-    MONITORING_MODE,
+    RECORDING_MODE
   };
 
   // The pointer returned from GetCategoryGroupEnabledInternal() points to a
@@ -58,8 +57,6 @@
   enum CategoryGroupEnabledFlags {
     // Category group enabled for the recording mode.
     ENABLED_FOR_RECORDING = 1 << 0,
-    // Category group enabled for the monitoring mode.
-    ENABLED_FOR_MONITORING = 1 << 1,
     // Category group enabled by SetEventCallbackEnabled().
     ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
     // Category group enabled to export events to ETW.
@@ -122,6 +119,28 @@
   void RemoveEnabledStateObserver(EnabledStateObserver* listener);
   bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
 
+  // Asynchronous enabled state listeners. When tracing is enabled or disabled,
+  // for each observer, a task for invoking its appropriate callback is posted
+  // to the thread from which AddAsyncEnabledStateObserver() was called. This
+  // allows the observer to be safely destroyed, provided that it happens on the
+  // same thread that invoked AddAsyncEnabledStateObserver().
+  class BASE_EXPORT AsyncEnabledStateObserver {
+   public:
+    virtual ~AsyncEnabledStateObserver() = default;
+
+    // Posted just after the tracing system becomes enabled, outside |lock_|.
+    // TraceLog::IsEnabled() is true at this point.
+    virtual void OnTraceLogEnabled() = 0;
+
+    // Posted just after the tracing system becomes disabled, outside |lock_|.
+    // TraceLog::IsEnabled() is false at this point.
+    virtual void OnTraceLogDisabled() = 0;
+  };
+  void AddAsyncEnabledStateObserver(
+      WeakPtr<AsyncEnabledStateObserver> listener);
+  void RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener);
+  bool HasAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener) const;
+
   TraceLogStatus GetStatus() const;
   bool BufferIsFull() const;
 
@@ -142,6 +161,7 @@
                                 char phase,
                                 const unsigned char* category_group_enabled,
                                 const char* name,
+                                const char* scope,
                                 unsigned long long id,
                                 int num_args,
                                 const char* const arg_names[],
@@ -169,7 +189,6 @@
   typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
                               bool has_more_events)> OutputCallback;
   void Flush(const OutputCallback& cb, bool use_worker_thread = false);
-  void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
 
   // Cancels tracing and discards collected data.
   void CancelTracing(const OutputCallback& cb);
@@ -188,41 +207,45 @@
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithBindId(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       unsigned long long bind_id,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithProcessId(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       int process_id,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       int thread_id,
       const TimeTicks& timestamp,
@@ -230,12 +253,13 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       unsigned long long bind_id,
       int thread_id,
@@ -244,17 +268,18 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
 
   // Adds a metadata event that will be written when the trace log is flushed.
   void AddMetadataEvent(
+      const unsigned char* category_group_enabled,
       const char* name,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
 
   void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
@@ -362,6 +387,7 @@
 
   class ThreadLocalEventBuffer;
   class OptionalAutoLock;
+  struct RegisteredAsyncObserver;
 
   TraceLog();
   ~TraceLog() override;
@@ -397,7 +423,7 @@
   void FlushCurrentThread(int generation, bool discard_events);
   // Usually it runs on a different thread.
   static void ConvertTraceEventsToTraceFormat(
-      scoped_ptr<TraceBuffer> logged_events,
+      std::unique_ptr<TraceBuffer> logged_events,
       const TraceLog::OutputCallback& flush_output_callback,
       const ArgumentFilterPredicate& argument_filter_predicate);
   void FinishFlush(int generation, bool discard_events);
@@ -434,11 +460,13 @@
   Lock thread_info_lock_;
   Mode mode_;
   int num_traces_recorded_;
-  scoped_ptr<TraceBuffer> logged_events_;
-  std::vector<scoped_ptr<TraceEvent>> metadata_events_;
+  std::unique_ptr<TraceBuffer> logged_events_;
+  std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
   subtle::AtomicWord /* EventCallback */ event_callback_;
   bool dispatching_to_observer_list_;
   std::vector<EnabledStateObserver*> enabled_state_observer_list_;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
+      async_observers_;
 
   std::string process_name_;
   base::hash_map<int, std::string> process_labels_;
@@ -467,7 +495,7 @@
   subtle::AtomicWord /* Options */ trace_options_;
 
   // Sampling thread handles.
-  scoped_ptr<TraceSamplingThread> sampling_thread_;
+  std::unique_ptr<TraceSamplingThread> sampling_thread_;
   PlatformThreadHandle sampling_thread_handle_;
 
   TraceConfig trace_config_;
@@ -484,7 +512,7 @@
 
   // For events which can't be added into the thread local buffer, e.g. events
   // from threads without a message loop.
-  scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
+  std::unique_ptr<TraceBufferChunk> thread_shared_chunk_;
   size_t thread_shared_chunk_index_;
 
   // Set when asynchronous Flush is in progress.
diff --git a/base/trace_event/trace_sampling_thread.cc b/base/trace_event/trace_sampling_thread.cc
index ec4602c..5a0d2f8 100644
--- a/base/trace_event/trace_sampling_thread.cc
+++ b/base/trace_event/trace_sampling_thread.cc
@@ -4,6 +4,7 @@
 
 #include <stddef.h>
 
+#include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_impl.h"
 #include "base/trace_event/trace_log.h"
 #include "base/trace_event/trace_sampling_thread.h"
@@ -24,7 +25,9 @@
 };
 
 TraceSamplingThread::TraceSamplingThread()
-    : thread_running_(false), waitable_event_for_testing_(false, false) {}
+    : thread_running_(false),
+      waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
+                                  WaitableEvent::InitialState::NOT_SIGNALED) {}
 
 TraceSamplingThread::~TraceSamplingThread() {}
 
@@ -54,8 +57,9 @@
   ExtractCategoryAndName(combined, &category_group, &name);
   TRACE_EVENT_API_ADD_TRACE_EVENT(
       TRACE_EVENT_PHASE_SAMPLE,
-      TraceLog::GetCategoryGroupEnabled(category_group), name, 0, 0, NULL, NULL,
-      NULL, NULL, 0);
+      TraceLog::GetCategoryGroupEnabled(category_group), name,
+      trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+      NULL, NULL, NULL, NULL, 0);
 }
 
 void TraceSamplingThread::GetSamples() {
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 8953554..487fd19 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -11,13 +11,14 @@
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/compiler_specific.h"
+#include "base/debug/leak_annotations.h"
 #include "base/logging.h"
 #include "base/process/process_handle.h"
-#include "base/profiler/alternate_timer.h"
 #include "base/strings/stringprintf.h"
+#include "base/third_party/valgrind/memcheck.h"
+#include "base/threading/worker_pool.h"
 #include "base/tracking_info.h"
 #include "build/build_config.h"
-#include "third_party/valgrind/memcheck.h"
 
 using base::TimeDelta;
 
@@ -36,15 +37,6 @@
 // this state may prevail for much or all of the process lifetime.
 const ThreadData::Status kInitialStartupState = ThreadData::PROFILING_ACTIVE;
 
-// Control whether an alternate time source (Now() function) is supported by
-// the ThreadData class.  This compile time flag should be set to true if we
-// want other modules (such as a memory allocator, or a thread-specific CPU time
-// clock) to be able to provide a thread-specific Now() function.  Without this
-// compile-time flag, the code will only support the wall-clock time.  This flag
-// can be flipped to efficiently disable this path (if there is a performance
-// problem with its presence).
-static const bool kAllowAlternateTimeSourceHandling = true;
-
 // Possible states of the profiler timing enabledness.
 enum {
   UNDEFINED_TIMING,
@@ -284,10 +276,7 @@
 // to them.
 
 // static
-NowFunction* ThreadData::now_function_ = NULL;
-
-// static
-bool ThreadData::now_function_is_time_ = false;
+ThreadData::NowFunction* ThreadData::now_function_for_testing_ = NULL;
 
 // A TLS slot which points to the ThreadData instance for the current thread.
 // We do a fake initialization here (zeroing out data), and then the real
@@ -367,7 +356,9 @@
 
 // static
 void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
-  Initialize();
+  if (base::WorkerPool::RunsTasksOnCurrentThread())
+    return;
+  EnsureTlsInitialization();
   ThreadData* current_thread_data =
       reinterpret_cast<ThreadData*>(tls_index_.Get());
   if (current_thread_data)
@@ -515,16 +506,6 @@
   random_number_ ^=
       static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
 
-  // We don't have queue durations without OS timer.  OS timer is automatically
-  // used for task-post-timing, so the use of an alternate timer implies all
-  // queue times are invalid, unless it was explicitly said that we can trust
-  // the alternate timer.
-  if (kAllowAlternateTimeSourceHandling &&
-      now_function_ &&
-      !now_function_is_time_) {
-    queue_duration = 0;
-  }
-
   DeathMap::iterator it = death_map_.find(&births);
   DeathData* death_data;
   if (it != death_map_.end()) {
@@ -691,13 +672,7 @@
   }
 }
 
-static void OptionallyInitializeAlternateTimer() {
-  NowFunction* alternate_time_source = GetAlternateTimeSource();
-  if (alternate_time_source)
-    ThreadData::SetAlternateTimeSource(alternate_time_source);
-}
-
-void ThreadData::Initialize() {
+void ThreadData::EnsureTlsInitialization() {
   if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
     return;  // Someone else did the initialization.
   // Due to racy lazy initialization in tests, we'll need to recheck status_
@@ -710,13 +685,6 @@
   if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
     return;  // Someone raced in here and beat us.
 
-  // Put an alternate timer in place if the environment calls for it, such as
-  // for tracking TCMalloc allocations.  This insertion is idempotent, so we
-  // don't mind if there is a race, and we'd prefer not to be in a lock while
-  // doing this work.
-  if (kAllowAlternateTimeSourceHandling)
-    OptionallyInitializeAlternateTimer();
-
   // Perform the "real" TLS initialization now, and leave it intact through
   // process termination.
   if (!tls_index_.initialized()) {  // Testing may have initialized this.
@@ -744,7 +712,7 @@
   DCHECK_GE(status, DEACTIVATED);
   DCHECK_LE(status, PROFILING_ACTIVE);
 
-  Initialize();  // No-op if already initialized.
+  EnsureTlsInitialization();  // No-op if already initialized.
 
   if (status > DEACTIVATED)
     status = PROFILING_ACTIVE;
@@ -762,29 +730,21 @@
 }
 
 // static
-void ThreadData::SetAlternateTimeSource(NowFunction* now_function) {
-  DCHECK(now_function);
-  if (kAllowAlternateTimeSourceHandling)
-    now_function_ = now_function;
-}
-
-// static
 void ThreadData::EnableProfilerTiming() {
   base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING);
 }
 
 // static
 TrackedTime ThreadData::Now() {
-  if (kAllowAlternateTimeSourceHandling && now_function_)
-    return TrackedTime::FromMilliseconds((*now_function_)());
+  if (now_function_for_testing_)
+    return TrackedTime::FromMilliseconds((*now_function_for_testing_)());
   if (IsProfilerTimingEnabled() && TrackingStatus())
     return TrackedTime::Now();
   return TrackedTime();  // Super fast when disabled, or not compiled.
 }
 
 // static
-void ThreadData::EnsureCleanupWasCalled(
-    int /* major_threads_shutdown_count */) {
+void ThreadData::EnsureCleanupWasCalled(int /*major_threads_shutdown_count*/) {
   base::AutoLock lock(*list_lock_.Pointer());
   if (worker_thread_data_creation_count_ == 0)
     return;  // We haven't really run much, and couldn't have leaked.
@@ -833,6 +793,7 @@
   if (leak) {
     ThreadData* thread_data = thread_data_list;
     while (thread_data) {
+      ANNOTATE_LEAKING_OBJECT_PTR(thread_data);
       thread_data = thread_data->next();
     }
     return;
@@ -993,6 +954,9 @@
 ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() {
 }
 
+ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot(
+    const ProcessDataPhaseSnapshot& other) = default;
+
 ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() {
 }
 
@@ -1007,6 +971,9 @@
 #endif
 }
 
+ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
+    default;
+
 ProcessDataSnapshot::~ProcessDataSnapshot() {
 }
 
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 1a00ec0..7ef0317 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -22,7 +22,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/process/process_handle.h"
-#include "base/profiler/alternate_timer.h"
 #include "base/profiler/tracked_time.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/thread_checker.h"
@@ -175,7 +174,7 @@
 // (worker threads don't have message loops generally, and hence gathering from
 // them will continue to be asynchronous).  We had an implementation of this in
 // the past, but the difficulty is dealing with message loops being terminated.
-// We can *try* to spam the available threads via some message loop proxy to
+// We can *try* to spam the available threads via some task runner to
 // achieve this feat, and it *might* be valuable when we are collecting data
 // for upload via UMA (where correctness of data may be more significant than
 // for a single screen of about:profiler).
@@ -515,7 +514,7 @@
 
   // Initializes all statics if needed (this initialization call should be made
   // while we are single threaded).
-  static void Initialize();
+  static void EnsureTlsInitialization();
 
   // Sets internal status_.
   // If |status| is false, then status_ is set to DEACTIVATED.
@@ -537,12 +536,6 @@
   // the code).
   static TrackedTime Now();
 
-  // Use the function |now| to provide current times, instead of calling the
-  // TrackedTime::Now() function.  Since this alternate function is being used,
-  // the other time arguments (used for calculating queueing delay) will be
-  // ignored.
-  static void SetAlternateTimeSource(NowFunction* now);
-
   // This function can be called at process termination to validate that thread
   // cleanup routines have been called for at least some number of named
   // threads.
@@ -559,8 +552,10 @@
   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
 
-  typedef std::map<const BirthOnThread*, int> BirthCountMap;
+  // Type for an alternate timer function (testing only).
+  typedef unsigned int NowFunction();
 
+  typedef std::map<const BirthOnThread*, int> BirthCountMap;
   typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
       DeathsSnapshot;
 
@@ -635,11 +630,7 @@
 
   // When non-null, this specifies an external function that supplies monotone
   // increasing time functcion.
-  static NowFunction* now_function_;
-
-  // If true, now_function_ returns values that can be used to calculate queue
-  // time.
-  static bool now_function_is_time_;
+  static NowFunction* now_function_for_testing_;
 
   // We use thread local store to identify which ThreadData to interact with.
   static base::ThreadLocalStorage::StaticSlot tls_index_;
@@ -804,6 +795,7 @@
 struct BASE_EXPORT ProcessDataPhaseSnapshot {
  public:
   ProcessDataPhaseSnapshot();
+  ProcessDataPhaseSnapshot(const ProcessDataPhaseSnapshot& other);
   ~ProcessDataPhaseSnapshot();
 
   std::vector<TaskSnapshot> tasks;
@@ -816,6 +808,7 @@
 struct BASE_EXPORT ProcessDataSnapshot {
  public:
   ProcessDataSnapshot();
+  ProcessDataSnapshot(const ProcessDataSnapshot& other);
   ~ProcessDataSnapshot();
 
   PhasedProcessDataSnapshotMap phased_snapshots;
diff --git a/base/tracked_objects_unittest.cc b/base/tracked_objects_unittest.cc
index 69dd85e..70d9601 100644
--- a/base/tracked_objects_unittest.cc
+++ b/base/tracked_objects_unittest.cc
@@ -9,7 +9,8 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "base/process/process_handle.h"
 #include "base/time/time.h"
 #include "base/tracking_info.h"
@@ -31,8 +32,7 @@
     ThreadData::ShutdownSingleThreadedCleanup(true);
 
     test_time_ = 0;
-    ThreadData::SetAlternateTimeSource(&TrackedObjectsTest::GetTestTime);
-    ThreadData::now_function_is_time_ = true;
+    ThreadData::now_function_for_testing_ = &TrackedObjectsTest::GetTestTime;
   }
 
   ~TrackedObjectsTest() override {
@@ -240,7 +240,7 @@
 TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
   ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
 
-  scoped_ptr<DeathData> data(new DeathData());
+  std::unique_ptr<DeathData> data(new DeathData());
   ASSERT_NE(data, nullptr);
   EXPECT_EQ(data->run_duration_sum(), 0);
   EXPECT_EQ(data->run_duration_max(), 0);
@@ -279,7 +279,7 @@
 TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
   ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
 
-  scoped_ptr<DeathData> data(new DeathData());
+  std::unique_ptr<DeathData> data(new DeathData());
   ASSERT_NE(data, nullptr);
 
   int32_t run_ms = 42;
diff --git a/base/tuple.h b/base/tuple.h
index e5872cc..e82f2e5 100644
--- a/base/tuple.h
+++ b/base/tuple.h
@@ -2,33 +2,31 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// A Tuple is a generic templatized container, similar in concept to std::pair
-// and std::tuple.  The convenient MakeTuple() function takes any number of
-// arguments and will construct and return the appropriate Tuple object.  The
-// functions DispatchToMethod and DispatchToFunction take a function pointer or
-// instance and method pointer, and unpack a tuple into arguments to the call.
-//
-// Tuple elements are copied by value, and stored in the tuple.  See the unit
-// tests for more details of how/when the values are copied.
+// Use std::tuple as tuple type. This file contains helper functions for
+// working with std::tuples.
+// The functions DispatchToMethod and DispatchToFunction take a function pointer
+// or instance and method pointer, and unpack a tuple into arguments to the
+// call.
 //
 // Example usage:
 //   // These two methods of creating a Tuple are identical.
-//   Tuple<int, const char*> tuple_a(1, "wee");
-//   Tuple<int, const char*> tuple_b = MakeTuple(1, "wee");
+//   std::tuple<int, const char*> tuple_a(1, "wee");
+//   std::tuple<int, const char*> tuple_b = std::make_tuple(1, "wee");
 //
 //   void SomeFunc(int a, const char* b) { }
 //   DispatchToFunction(&SomeFunc, tuple_a);  // SomeFunc(1, "wee")
 //   DispatchToFunction(
-//       &SomeFunc, MakeTuple(10, "foo"));    // SomeFunc(10, "foo")
+//       &SomeFunc, std::make_tuple(10, "foo"));    // SomeFunc(10, "foo")
 //
 //   struct { void SomeMeth(int a, int b, int c) { } } foo;
-//   DispatchToMethod(&foo, &Foo::SomeMeth, MakeTuple(1, 2, 3));
+//   DispatchToMethod(&foo, &Foo::SomeMeth, std::make_tuple(1, 2, 3));
 //   // foo->SomeMeth(1, 2, 3);
 
 #ifndef BASE_TUPLE_H_
 #define BASE_TUPLE_H_
 
 #include <stddef.h>
+#include <tuple>
 
 #include "base/bind_helpers.h"
 #include "build/build_config.h"
@@ -93,7 +91,7 @@
   using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11,12>;
 };
 
-#else  // defined(WIN) && defined(_PREFAST_)
+#else  // defined(OS_WIN) && defined(_PREFAST_)
 
 template <size_t... Ns>
 struct MakeIndexSequenceImpl<0, Ns...> {
@@ -104,135 +102,25 @@
 struct MakeIndexSequenceImpl<N, Ns...>
     : MakeIndexSequenceImpl<N - 1, N - 1, Ns...> {};
 
-#endif  // defined(WIN) && defined(_PREFAST_)
+#endif  // defined(OS_WIN) && defined(_PREFAST_)
+
+// std::get() in <=libstdc++-4.6 returns an lvalue-reference for
+// rvalue-reference of a tuple, where an rvalue-reference is expected.
+template <size_t I, typename... Ts>
+typename std::tuple_element<I, std::tuple<Ts...>>::type&& get(
+    std::tuple<Ts...>&& t) {
+  using ElemType = typename std::tuple_element<I, std::tuple<Ts...>>::type;
+  return std::forward<ElemType>(std::get<I>(t));
+}
+
+template <size_t I, typename T>
+auto get(T& t) -> decltype(std::get<I>(t)) {
+  return std::get<I>(t);
+}
 
 template <size_t N>
 using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
 
-// Traits ----------------------------------------------------------------------
-//
-// A simple traits class for tuple arguments.
-//
-// ValueType: the bare, nonref version of a type (same as the type for nonrefs).
-// RefType: the ref version of a type (same as the type for refs).
-// ParamType: what type to pass to functions (refs should not be constified).
-
-template <class P>
-struct TupleTraits {
-  typedef P ValueType;
-  typedef P& RefType;
-  typedef const P& ParamType;
-};
-
-template <class P>
-struct TupleTraits<P&> {
-  typedef P ValueType;
-  typedef P& RefType;
-  typedef P& ParamType;
-};
-
-// Tuple -----------------------------------------------------------------------
-//
-// This set of classes is useful for bundling 0 or more heterogeneous data types
-// into a single variable.  The advantage of this is that it greatly simplifies
-// function objects that need to take an arbitrary number of parameters; see
-// RunnableMethod and IPC::MessageWithTuple.
-//
-// Tuple<> is supplied to act as a 'void' type.  It can be used, for example,
-// when dispatching to a function that accepts no arguments (see the
-// Dispatchers below).
-// Tuple<A> is rarely useful.  One such use is when A is non-const ref that you
-// want filled by the dispatchee, and the tuple is merely a container for that
-// output (a "tier").  See MakeRefTuple and its usages.
-
-template <typename IxSeq, typename... Ts>
-struct TupleBaseImpl;
-template <typename... Ts>
-using TupleBase = TupleBaseImpl<MakeIndexSequence<sizeof...(Ts)>, Ts...>;
-template <size_t N, typename T>
-struct TupleLeaf;
-
-template <typename... Ts>
-struct Tuple final : TupleBase<Ts...> {
-  Tuple() : TupleBase<Ts...>() {}
-  explicit Tuple(typename TupleTraits<Ts>::ParamType... args)
-      : TupleBase<Ts...>(args...) {}
-};
-
-// Avoids ambiguity between Tuple's two constructors.
-template <>
-struct Tuple<> final {};
-
-template <size_t... Ns, typename... Ts>
-struct TupleBaseImpl<IndexSequence<Ns...>, Ts...> : TupleLeaf<Ns, Ts>... {
-  TupleBaseImpl() : TupleLeaf<Ns, Ts>()... {}
-  explicit TupleBaseImpl(typename TupleTraits<Ts>::ParamType... args)
-      : TupleLeaf<Ns, Ts>(args)... {}
-};
-
-template <size_t N, typename T>
-struct TupleLeaf {
-  TupleLeaf() {}
-  explicit TupleLeaf(typename TupleTraits<T>::ParamType x) : x(x) {}
-
-  T& get() { return x; }
-  const T& get() const { return x; }
-
-  T x;
-};
-
-// Tuple getters --------------------------------------------------------------
-//
-// Allows accessing an arbitrary tuple element by index.
-//
-// Example usage:
-//   base::Tuple<int, double> t2;
-//   base::get<0>(t2) = 42;
-//   base::get<1>(t2) = 3.14;
-
-template <size_t I, typename T>
-T& get(TupleLeaf<I, T>& leaf) {
-  return leaf.get();
-}
-
-template <size_t I, typename T>
-const T& get(const TupleLeaf<I, T>& leaf) {
-  return leaf.get();
-}
-
-// Tuple types ----------------------------------------------------------------
-//
-// Allows for selection of ValueTuple/RefTuple/ParamTuple without needing the
-// definitions of class types the tuple takes as parameters.
-
-template <typename T>
-struct TupleTypes;
-
-template <typename... Ts>
-struct TupleTypes<Tuple<Ts...>> {
-  using ValueTuple = Tuple<typename TupleTraits<Ts>::ValueType...>;
-  using RefTuple = Tuple<typename TupleTraits<Ts>::RefType...>;
-  using ParamTuple = Tuple<typename TupleTraits<Ts>::ParamType...>;
-};
-
-// Tuple creators -------------------------------------------------------------
-//
-// Helper functions for constructing tuples while inferring the template
-// argument types.
-
-template <typename... Ts>
-inline Tuple<Ts...> MakeTuple(const Ts&... arg) {
-  return Tuple<Ts...>(arg...);
-}
-
-// The following set of helpers make what Boost refers to as "Tiers" - a tuple
-// of references.
-
-template <typename... Ts>
-inline Tuple<Ts&...> MakeRefTuple(Ts&... arg) {
-  return Tuple<Ts&...>(arg...);
-}
-
 // Dispatchers ----------------------------------------------------------------
 //
 // Helper functions that call the given method on an object, with the unpacked
@@ -245,17 +133,17 @@
 // Non-Static Dispatchers with no out params.
 
 template <typename ObjT, typename Method, typename... Ts, size_t... Ns>
-inline void DispatchToMethodImpl(ObjT* obj,
+inline void DispatchToMethodImpl(const ObjT& obj,
                                  Method method,
-                                 const Tuple<Ts...>& arg,
+                                 const std::tuple<Ts...>& arg,
                                  IndexSequence<Ns...>) {
-  (obj->*method)(base::internal::UnwrapTraits<Ts>::Unwrap(get<Ns>(arg))...);
+  (obj->*method)(internal::Unwrap(std::get<Ns>(arg))...);
 }
 
 template <typename ObjT, typename Method, typename... Ts>
-inline void DispatchToMethod(ObjT* obj,
+inline void DispatchToMethod(const ObjT& obj,
                              Method method,
-                             const Tuple<Ts...>& arg) {
+                             const std::tuple<Ts...>& arg) {
   DispatchToMethodImpl(obj, method, arg, MakeIndexSequence<sizeof...(Ts)>());
 }
 
@@ -263,13 +151,14 @@
 
 template <typename Function, typename... Ts, size_t... Ns>
 inline void DispatchToFunctionImpl(Function function,
-                                   const Tuple<Ts...>& arg,
+                                   const std::tuple<Ts...>& arg,
                                    IndexSequence<Ns...>) {
-  (*function)(base::internal::UnwrapTraits<Ts>::Unwrap(get<Ns>(arg))...);
+  (*function)(internal::Unwrap(std::get<Ns>(arg))...);
 }
 
 template <typename Function, typename... Ts>
-inline void DispatchToFunction(Function function, const Tuple<Ts...>& arg) {
+inline void DispatchToFunction(Function function,
+                               const std::tuple<Ts...>& arg) {
   DispatchToFunctionImpl(function, arg, MakeIndexSequence<sizeof...(Ts)>());
 }
 
@@ -281,21 +170,21 @@
           typename... OutTs,
           size_t... InNs,
           size_t... OutNs>
-inline void DispatchToMethodImpl(ObjT* obj,
+inline void DispatchToMethodImpl(const ObjT& obj,
                                  Method method,
-                                 const Tuple<InTs...>& in,
-                                 Tuple<OutTs...>* out,
+                                 const std::tuple<InTs...>& in,
+                                 std::tuple<OutTs...>* out,
                                  IndexSequence<InNs...>,
                                  IndexSequence<OutNs...>) {
-  (obj->*method)(base::internal::UnwrapTraits<InTs>::Unwrap(get<InNs>(in))...,
-                 &get<OutNs>(*out)...);
+  (obj->*method)(internal::Unwrap(std::get<InNs>(in))...,
+                 &std::get<OutNs>(*out)...);
 }
 
 template <typename ObjT, typename Method, typename... InTs, typename... OutTs>
-inline void DispatchToMethod(ObjT* obj,
+inline void DispatchToMethod(const ObjT& obj,
                              Method method,
-                             const Tuple<InTs...>& in,
-                             Tuple<OutTs...>* out) {
+                             const std::tuple<InTs...>& in,
+                             std::tuple<OutTs...>* out) {
   DispatchToMethodImpl(obj, method, in, out,
                        MakeIndexSequence<sizeof...(InTs)>(),
                        MakeIndexSequence<sizeof...(OutTs)>());
diff --git a/base/tuple_unittest.cc b/base/tuple_unittest.cc
index 55a9139..6f90c29 100644
--- a/base/tuple_unittest.cc
+++ b/base/tuple_unittest.cc
@@ -32,51 +32,34 @@
 }  // namespace
 
 TEST(TupleTest, Basic) {
-  base::Tuple<> t0 = base::MakeTuple();
+  std::tuple<> t0 = std::make_tuple();
   ALLOW_UNUSED_LOCAL(t0);
-  base::Tuple<int> t1(1);
-  base::Tuple<int, const char*> t2 =
-      base::MakeTuple(1, static_cast<const char*>("wee"));
-  base::Tuple<int, int, int> t3(1, 2, 3);
-  base::Tuple<int, int, int, int*> t4(1, 2, 3, &get<0>(t1));
-  base::Tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &get<0>(t4));
-  base::Tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &get<0>(t4));
+  std::tuple<int> t1(1);
+  std::tuple<int, const char*> t2 =
+      std::make_tuple(1, static_cast<const char*>("wee"));
+  ALLOW_UNUSED_LOCAL(t2);
+  std::tuple<int, int, int> t3(1, 2, 3);
+  std::tuple<int, int, int, int*> t4(1, 2, 3, &std::get<0>(t1));
+  std::tuple<int, int, int, int, int*> t5(1, 2, 3, 4, &std::get<0>(t4));
+  std::tuple<int, int, int, int, int, int*> t6(1, 2, 3, 4, 5, &std::get<0>(t4));
 
-  EXPECT_EQ(1, get<0>(t1));
-  EXPECT_EQ(1, get<0>(t2));
-  EXPECT_EQ(1, get<0>(t3));
-  EXPECT_EQ(2, get<1>(t3));
-  EXPECT_EQ(3, get<2>(t3));
-  EXPECT_EQ(1, get<0>(t4));
-  EXPECT_EQ(2, get<1>(t4));
-  EXPECT_EQ(3, get<2>(t4));
-  EXPECT_EQ(1, get<0>(t5));
-  EXPECT_EQ(2, get<1>(t5));
-  EXPECT_EQ(3, get<2>(t5));
-  EXPECT_EQ(4, get<3>(t5));
-  EXPECT_EQ(1, get<0>(t6));
-  EXPECT_EQ(2, get<1>(t6));
-  EXPECT_EQ(3, get<2>(t6));
-  EXPECT_EQ(4, get<3>(t6));
-  EXPECT_EQ(5, get<4>(t6));
-
-  EXPECT_EQ(1, get<0>(t1));
+  EXPECT_EQ(1, std::get<0>(t1));
   DispatchToFunction(&DoAdd, t4);
-  EXPECT_EQ(6, get<0>(t1));
+  EXPECT_EQ(6, std::get<0>(t1));
 
   int res = 0;
-  DispatchToFunction(&DoAdd, base::MakeTuple(9, 8, 7, &res));
+  DispatchToFunction(&DoAdd, std::make_tuple(9, 8, 7, &res));
   EXPECT_EQ(24, res);
 
   Addy addy;
-  EXPECT_EQ(1, get<0>(t4));
+  EXPECT_EQ(1, std::get<0>(t4));
   DispatchToMethod(&addy, &Addy::DoAdd, t5);
-  EXPECT_EQ(10, get<0>(t4));
+  EXPECT_EQ(10, std::get<0>(t4));
 
   Addz addz;
-  EXPECT_EQ(10, get<0>(t4));
+  EXPECT_EQ(10, std::get<0>(t4));
   DispatchToMethod(&addz, &Addz::DoAdd, t6);
-  EXPECT_EQ(15, get<0>(t4));
+  EXPECT_EQ(15, std::get<0>(t4));
 }
 
 namespace {
@@ -111,8 +94,8 @@
   bool res = false;
 
   // Creating the tuple should copy the class to store internally in the tuple.
-  base::Tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
-  get<1>(tuple) = &get<0>(tuple);
+  std::tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
+  std::get<1>(tuple) = &std::get<0>(tuple);
   EXPECT_EQ(2, CopyLogger::TimesConstructed);
   EXPECT_EQ(1, CopyLogger::TimesCopied);
 
@@ -131,4 +114,30 @@
   EXPECT_EQ(2, CopyLogger::TimesCopied);
 }
 
+TEST(TupleTest, Get) {
+  int i = 1;
+  int j = 2;
+  std::tuple<int, int&, int&&> t(3, i, std::move(j));
+  EXPECT_TRUE((std::is_same<int&, decltype(base::get<0>(t))>::value));
+  EXPECT_EQ(3, base::get<0>(t));
+
+  EXPECT_TRUE((std::is_same<int&, decltype(base::get<1>(t))>::value));
+  EXPECT_EQ(1, base::get<1>(t));
+
+  EXPECT_TRUE((std::is_same<int&, decltype(base::get<2>(t))>::value));
+  EXPECT_EQ(2, base::get<2>(t));
+
+  EXPECT_TRUE((std::is_same<int&&,
+               decltype(base::get<0>(std::move(t)))>::value));
+  EXPECT_EQ(3, base::get<0>(std::move(t)));
+
+  EXPECT_TRUE((std::is_same<int&,
+               decltype(base::get<1>(std::move(t)))>::value));
+  EXPECT_EQ(1, base::get<1>(std::move(t)));
+
+  EXPECT_TRUE((std::is_same<int&&,
+               decltype(base::get<2>(std::move(t)))>::value));
+  EXPECT_EQ(2, base::get<2>(std::move(t)));
+}
+
 }  // namespace base
diff --git a/base/values.cc b/base/values.cc
index 3f32b5e..d579699 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -13,7 +13,7 @@
 
 #include "base/json/json_writer.h"
 #include "base/logging.h"
-#include "base/move.h"
+#include "base/memory/ptr_util.h"
 #include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversions.h"
 
@@ -21,15 +21,15 @@
 
 namespace {
 
-scoped_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
 
 // Make a deep copy of |node|, but don't include empty lists or dictionaries
 // in the copy. It's possible for this function to return NULL and it
 // expects |node| to always be non-NULL.
-scoped_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
-  scoped_ptr<ListValue> copy;
-  for (ListValue::const_iterator it = list.begin(); it != list.end(); ++it) {
-    scoped_ptr<Value> child_copy = CopyWithoutEmptyChildren(**it);
+std::unique_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
+  std::unique_ptr<ListValue> copy;
+  for (const auto& entry : list) {
+    std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(*entry);
     if (child_copy) {
       if (!copy)
         copy.reset(new ListValue);
@@ -39,11 +39,11 @@
   return copy;
 }
 
-scoped_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
+std::unique_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
     const DictionaryValue& dict) {
-  scoped_ptr<DictionaryValue> copy;
+  std::unique_ptr<DictionaryValue> copy;
   for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
-    scoped_ptr<Value> child_copy = CopyWithoutEmptyChildren(it.value());
+    std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(it.value());
     if (child_copy) {
       if (!copy)
         copy.reset(new DictionaryValue);
@@ -53,7 +53,7 @@
   return copy;
 }
 
-scoped_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
   switch (node.GetType()) {
     case Value::TYPE_LIST:
       return CopyListWithoutEmptyChildren(static_cast<const ListValue&>(node));
@@ -67,73 +67,57 @@
   }
 }
 
-// A small functor for comparing Values for std::find_if and similar.
-class ValueEquals {
- public:
-  // Pass the value against which all consecutive calls of the () operator will
-  // compare their argument to. This Value object must not be destroyed while
-  // the ValueEquals is  in use.
-  explicit ValueEquals(const Value* first) : first_(first) { }
-
-  bool operator ()(const Value* second) const {
-    return first_->Equals(second);
-  }
-
- private:
-  const Value* first_;
-};
-
 }  // namespace
 
 Value::~Value() {
 }
 
 // static
-scoped_ptr<Value> Value::CreateNullValue() {
-  return make_scoped_ptr(new Value(TYPE_NULL));
+std::unique_ptr<Value> Value::CreateNullValue() {
+  return WrapUnique(new Value(TYPE_NULL));
 }
 
-bool Value::GetAsBinary(const BinaryValue** /* out_value */) const {
+bool Value::GetAsBinary(const BinaryValue**) const {
   return false;
 }
 
-bool Value::GetAsBoolean(bool* /* out_value */) const {
+bool Value::GetAsBoolean(bool*) const {
   return false;
 }
 
-bool Value::GetAsInteger(int* /* out_value */) const {
+bool Value::GetAsInteger(int*) const {
   return false;
 }
 
-bool Value::GetAsDouble(double* /* out_value */) const {
+bool Value::GetAsDouble(double*) const {
   return false;
 }
 
-bool Value::GetAsString(std::string* /* out_value */) const {
+bool Value::GetAsString(std::string*) const {
   return false;
 }
 
-bool Value::GetAsString(string16* /* out_value */) const {
+bool Value::GetAsString(string16*) const {
   return false;
 }
 
-bool Value::GetAsString(const StringValue** /* out_value */) const {
+bool Value::GetAsString(const StringValue**) const {
   return false;
 }
 
-bool Value::GetAsList(ListValue** /* out_value */) {
+bool Value::GetAsList(ListValue**) {
   return false;
 }
 
-bool Value::GetAsList(const ListValue** /* out_value */) const {
+bool Value::GetAsList(const ListValue**) const {
   return false;
 }
 
-bool Value::GetAsDictionary(DictionaryValue** /* out_value */) {
+bool Value::GetAsDictionary(DictionaryValue**) {
   return false;
 }
 
-bool Value::GetAsDictionary(const DictionaryValue** /* out_value */) const {
+bool Value::GetAsDictionary(const DictionaryValue**) const {
   return false;
 }
 
@@ -144,8 +128,8 @@
   return CreateNullValue().release();
 }
 
-scoped_ptr<Value> Value::CreateDeepCopy() const {
-  return make_scoped_ptr(DeepCopy());
+std::unique_ptr<Value> Value::CreateDeepCopy() const {
+  return WrapUnique(DeepCopy());
 }
 
 bool Value::Equals(const Value* other) const {
@@ -313,19 +297,19 @@
       size_(0) {
 }
 
-BinaryValue::BinaryValue(scoped_ptr<char[]> buffer, size_t size)
+BinaryValue::BinaryValue(std::unique_ptr<char[]> buffer, size_t size)
     : Value(TYPE_BINARY), buffer_(std::move(buffer)), size_(size) {}
 
 BinaryValue::~BinaryValue() {
 }
 
 // static
-BinaryValue* BinaryValue::CreateWithCopiedBuffer(const char* buffer,
-                                                 size_t size) {
-  char* buffer_copy = new char[size];
-  memcpy(buffer_copy, buffer, size);
-  scoped_ptr<char[]> scoped_buffer_copy(buffer_copy);
-  return new BinaryValue(std::move(scoped_buffer_copy), size);
+std::unique_ptr<BinaryValue> BinaryValue::CreateWithCopiedBuffer(
+    const char* buffer,
+    size_t size) {
+  std::unique_ptr<char[]> buffer_copy(new char[size]);
+  memcpy(buffer_copy.get(), buffer, size);
+  return base::MakeUnique<BinaryValue>(std::move(buffer_copy), size);
 }
 
 bool BinaryValue::GetAsBinary(const BinaryValue** out_value) const {
@@ -335,7 +319,7 @@
 }
 
 BinaryValue* BinaryValue::DeepCopy() const {
-  return CreateWithCopiedBuffer(buffer_.get(), size_);
+  return CreateWithCopiedBuffer(buffer_.get(), size_).release();
 }
 
 bool BinaryValue::Equals(const Value* other) const {
@@ -350,11 +334,12 @@
 ///////////////////// DictionaryValue ////////////////////
 
 // static
-scoped_ptr<DictionaryValue> DictionaryValue::From(scoped_ptr<Value> value) {
+std::unique_ptr<DictionaryValue> DictionaryValue::From(
+    std::unique_ptr<Value> value) {
   DictionaryValue* out;
   if (value && value->GetAsDictionary(&out)) {
     ignore_result(value.release());
-    return make_scoped_ptr(out);
+    return WrapUnique(out);
   }
   return nullptr;
 }
@@ -381,22 +366,17 @@
 
 bool DictionaryValue::HasKey(const std::string& key) const {
   DCHECK(IsStringUTF8(key));
-  ValueMap::const_iterator current_entry = dictionary_.find(key);
+  auto current_entry = dictionary_.find(key);
   DCHECK((current_entry == dictionary_.end()) || current_entry->second);
   return current_entry != dictionary_.end();
 }
 
 void DictionaryValue::Clear() {
-  ValueMap::iterator dict_iterator = dictionary_.begin();
-  while (dict_iterator != dictionary_.end()) {
-    delete dict_iterator->second;
-    ++dict_iterator;
-  }
-
   dictionary_.clear();
 }
 
-void DictionaryValue::Set(const std::string& path, scoped_ptr<Value> in_value) {
+void DictionaryValue::Set(const std::string& path,
+                          std::unique_ptr<Value> in_value) {
   DCHECK(IsStringUTF8(path));
   DCHECK(in_value);
 
@@ -422,7 +402,7 @@
 }
 
 void DictionaryValue::Set(const std::string& path, Value* in_value) {
-  Set(path, make_scoped_ptr(in_value));
+  Set(path, WrapUnique(in_value));
 }
 
 void DictionaryValue::SetBoolean(const std::string& path, bool in_value) {
@@ -448,22 +428,13 @@
 }
 
 void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
-                                              scoped_ptr<Value> in_value) {
-  Value* bare_ptr = in_value.release();
-  // If there's an existing value here, we need to delete it, because
-  // we own all our children.
-  std::pair<ValueMap::iterator, bool> ins_res =
-      dictionary_.insert(std::make_pair(key, bare_ptr));
-  if (!ins_res.second) {
-    DCHECK_NE(ins_res.first->second, bare_ptr);  // This would be bogus
-    delete ins_res.first->second;
-    ins_res.first->second = bare_ptr;
-  }
+                                              std::unique_ptr<Value> in_value) {
+  dictionary_[key] = std::move(in_value);
 }
 
 void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
                                               Value* in_value) {
-  SetWithoutPathExpansion(key, make_scoped_ptr(in_value));
+  SetWithoutPathExpansion(key, WrapUnique(in_value));
 }
 
 void DictionaryValue::SetBooleanWithoutPathExpansion(
@@ -642,13 +613,12 @@
 bool DictionaryValue::GetWithoutPathExpansion(const std::string& key,
                                               const Value** out_value) const {
   DCHECK(IsStringUTF8(key));
-  ValueMap::const_iterator entry_iterator = dictionary_.find(key);
+  auto entry_iterator = dictionary_.find(key);
   if (entry_iterator == dictionary_.end())
     return false;
 
-  const Value* entry = entry_iterator->second;
   if (out_value)
-    *out_value = entry;
+    *out_value = entry_iterator->second.get();
   return true;
 }
 
@@ -752,7 +722,7 @@
 }
 
 bool DictionaryValue::Remove(const std::string& path,
-                             scoped_ptr<Value>* out_value) {
+                             std::unique_ptr<Value>* out_value) {
   DCHECK(IsStringUTF8(path));
   std::string current_path(path);
   DictionaryValue* current_dictionary = this;
@@ -768,24 +738,22 @@
                                                         out_value);
 }
 
-bool DictionaryValue::RemoveWithoutPathExpansion(const std::string& key,
-                                                 scoped_ptr<Value>* out_value) {
+bool DictionaryValue::RemoveWithoutPathExpansion(
+    const std::string& key,
+    std::unique_ptr<Value>* out_value) {
   DCHECK(IsStringUTF8(key));
-  ValueMap::iterator entry_iterator = dictionary_.find(key);
+  auto entry_iterator = dictionary_.find(key);
   if (entry_iterator == dictionary_.end())
     return false;
 
-  Value* entry = entry_iterator->second;
   if (out_value)
-    out_value->reset(entry);
-  else
-    delete entry;
+    *out_value = std::move(entry_iterator->second);
   dictionary_.erase(entry_iterator);
   return true;
 }
 
 bool DictionaryValue::RemovePath(const std::string& path,
-                                 scoped_ptr<Value>* out_value) {
+                                 std::unique_ptr<Value>* out_value) {
   bool result = false;
   size_t delimiter_position = path.find('.');
 
@@ -804,9 +772,10 @@
   return result;
 }
 
-scoped_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
+std::unique_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
     const {
-  scoped_ptr<DictionaryValue> copy = CopyDictionaryWithoutEmptyChildren(*this);
+  std::unique_ptr<DictionaryValue> copy =
+      CopyDictionaryWithoutEmptyChildren(*this);
   if (!copy)
     copy.reset(new DictionaryValue);
   return copy;
@@ -837,22 +806,23 @@
     : target_(target),
       it_(target.dictionary_.begin()) {}
 
+DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
+
 DictionaryValue::Iterator::~Iterator() {}
 
 DictionaryValue* DictionaryValue::DeepCopy() const {
   DictionaryValue* result = new DictionaryValue;
 
-  for (ValueMap::const_iterator current_entry(dictionary_.begin());
-       current_entry != dictionary_.end(); ++current_entry) {
-    result->SetWithoutPathExpansion(current_entry->first,
-                                    current_entry->second->DeepCopy());
+  for (const auto& current_entry : dictionary_) {
+    result->SetWithoutPathExpansion(current_entry.first,
+                                    current_entry.second->CreateDeepCopy());
   }
 
   return result;
 }
 
-scoped_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
-  return make_scoped_ptr(DeepCopy());
+std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
+  return WrapUnique(DeepCopy());
 }
 
 bool DictionaryValue::Equals(const Value* other) const {
@@ -880,11 +850,11 @@
 ///////////////////// ListValue ////////////////////
 
 // static
-scoped_ptr<ListValue> ListValue::From(scoped_ptr<Value> value) {
+std::unique_ptr<ListValue> ListValue::From(std::unique_ptr<Value> value) {
   ListValue* out;
   if (value && value->GetAsList(&out)) {
     ignore_result(value.release());
-    return make_scoped_ptr(out);
+    return WrapUnique(out);
   }
   return nullptr;
 }
@@ -897,12 +867,14 @@
 }
 
 void ListValue::Clear() {
-  for (ValueVector::iterator i(list_.begin()); i != list_.end(); ++i)
-    delete *i;
   list_.clear();
 }
 
 bool ListValue::Set(size_t index, Value* in_value) {
+  return Set(index, WrapUnique(in_value));
+}
+
+bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
   if (!in_value)
     return false;
 
@@ -910,25 +882,21 @@
     // Pad out any intermediate indexes with null settings
     while (index > list_.size())
       Append(CreateNullValue());
-    Append(in_value);
+    Append(std::move(in_value));
   } else {
+    // TODO(dcheng): remove this DCHECK once the raw pointer version is removed?
     DCHECK(list_[index] != in_value);
-    delete list_[index];
-    list_[index] = in_value;
+    list_[index] = std::move(in_value);
   }
   return true;
 }
 
-bool ListValue::Set(size_t index, scoped_ptr<Value> in_value) {
-  return Set(index, in_value.release());
-}
-
 bool ListValue::Get(size_t index, const Value** out_value) const {
   if (index >= list_.size())
     return false;
 
   if (out_value)
-    *out_value = list_[index];
+    *out_value = list_[index].get();
 
   return true;
 }
@@ -1034,25 +1002,22 @@
       const_cast<const ListValue**>(out_value));
 }
 
-bool ListValue::Remove(size_t index, scoped_ptr<Value>* out_value) {
+bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
   if (index >= list_.size())
     return false;
 
   if (out_value)
-    out_value->reset(list_[index]);
-  else
-    delete list_[index];
+    *out_value = std::move(list_[index]);
 
   list_.erase(list_.begin() + index);
   return true;
 }
 
 bool ListValue::Remove(const Value& value, size_t* index) {
-  for (ValueVector::iterator i(list_.begin()); i != list_.end(); ++i) {
-    if ((*i)->Equals(&value)) {
-      size_t previous_index = i - list_.begin();
-      delete *i;
-      list_.erase(i);
+  for (auto it = list_.begin(); it != list_.end(); ++it) {
+    if ((*it)->Equals(&value)) {
+      size_t previous_index = it - list_.begin();
+      list_.erase(it);
 
       if (index)
         *index = previous_index;
@@ -1063,22 +1028,20 @@
 }
 
 ListValue::iterator ListValue::Erase(iterator iter,
-                                     scoped_ptr<Value>* out_value) {
+                                     std::unique_ptr<Value>* out_value) {
   if (out_value)
-    out_value->reset(*iter);
-  else
-    delete *iter;
+    *out_value = std::move(*Storage::iterator(iter));
 
   return list_.erase(iter);
 }
 
-void ListValue::Append(scoped_ptr<Value> in_value) {
-  Append(in_value.release());
+void ListValue::Append(std::unique_ptr<Value> in_value) {
+  list_.push_back(std::move(in_value));
 }
 
 void ListValue::Append(Value* in_value) {
   DCHECK(in_value);
-  list_.push_back(in_value);
+  Append(WrapUnique(in_value));
 }
 
 void ListValue::AppendBoolean(bool in_value) {
@@ -1117,13 +1080,13 @@
 
 bool ListValue::AppendIfNotPresent(Value* in_value) {
   DCHECK(in_value);
-  for (ValueVector::const_iterator i(list_.begin()); i != list_.end(); ++i) {
-    if ((*i)->Equals(in_value)) {
+  for (const auto& entry : list_) {
+    if (entry->Equals(in_value)) {
       delete in_value;
       return false;
     }
   }
-  list_.push_back(in_value);
+  list_.emplace_back(in_value);
   return true;
 }
 
@@ -1132,12 +1095,15 @@
   if (index > list_.size())
     return false;
 
-  list_.insert(list_.begin() + index, in_value);
+  list_.insert(list_.begin() + index, WrapUnique(in_value));
   return true;
 }
 
 ListValue::const_iterator ListValue::Find(const Value& value) const {
-  return std::find_if(list_.begin(), list_.end(), ValueEquals(&value));
+  return std::find_if(list_.begin(), list_.end(),
+                      [&value](const std::unique_ptr<Value>& entry) {
+                        return entry->Equals(&value);
+                      });
 }
 
 void ListValue::Swap(ListValue* other) {
@@ -1159,14 +1125,14 @@
 ListValue* ListValue::DeepCopy() const {
   ListValue* result = new ListValue;
 
-  for (ValueVector::const_iterator i(list_.begin()); i != list_.end(); ++i)
-    result->Append((*i)->DeepCopy());
+  for (const auto& entry : list_)
+    result->Append(entry->CreateDeepCopy());
 
   return result;
 }
 
-scoped_ptr<ListValue> ListValue::CreateDeepCopy() const {
-  return make_scoped_ptr(DeepCopy());
+std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
+  return WrapUnique(DeepCopy());
 }
 
 bool ListValue::Equals(const Value* other) const {
@@ -1175,11 +1141,11 @@
 
   const ListValue* other_list =
       static_cast<const ListValue*>(other);
-  const_iterator lhs_it, rhs_it;
+  Storage::const_iterator lhs_it, rhs_it;
   for (lhs_it = begin(), rhs_it = other_list->begin();
        lhs_it != end() && rhs_it != other_list->end();
        ++lhs_it, ++rhs_it) {
-    if (!(*lhs_it)->Equals(*rhs_it))
+    if (!(*lhs_it)->Equals(rhs_it->get()))
       return false;
   }
   if (lhs_it != end() || rhs_it != other_list->end())
diff --git a/base/values.h b/base/values.h
index 07e5b6c..e3d6089 100644
--- a/base/values.h
+++ b/base/values.h
@@ -22,6 +22,7 @@
 
 #include <iosfwd>
 #include <map>
+#include <memory>
 #include <string>
 #include <utility>
 #include <vector>
@@ -29,7 +30,6 @@
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string16.h"
 #include "base/strings/string_piece.h"
 
@@ -42,9 +42,6 @@
 class StringValue;
 class Value;
 
-typedef std::vector<Value*> ValueVector;
-typedef std::map<std::string, Value*> ValueMap;
-
 // The Value class is the base class for Values. A Value can be instantiated
 // via the Create*Value() factory methods, or by directly creating instances of
 // the subclasses.
@@ -66,7 +63,7 @@
 
   virtual ~Value();
 
-  static scoped_ptr<Value> CreateNullValue();
+  static std::unique_ptr<Value> CreateNullValue();
 
   // Returns the type of the value stored by the current Value object.
   // Each type will be implemented by only one subclass of Value, so it's
@@ -102,7 +99,7 @@
   // this works because C++ supports covariant return types.
   virtual Value* DeepCopy() const;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
-  scoped_ptr<Value> CreateDeepCopy() const;
+  std::unique_ptr<Value> CreateDeepCopy() const;
 
   // Compares if two Value objects have equal contents.
   virtual bool Equals(const Value* other) const;
@@ -178,14 +175,15 @@
 
   // Creates a BinaryValue, taking ownership of the bytes pointed to by
   // |buffer|.
-  BinaryValue(scoped_ptr<char[]> buffer, size_t size);
+  BinaryValue(std::unique_ptr<char[]> buffer, size_t size);
 
   ~BinaryValue() override;
 
   // For situations where you want to keep ownership of your buffer, this
   // factory method creates a new BinaryValue by copying the contents of the
   // buffer that's passed in.
-  static BinaryValue* CreateWithCopiedBuffer(const char* buffer, size_t size);
+  static std::unique_ptr<BinaryValue> CreateWithCopiedBuffer(const char* buffer,
+                                                             size_t size);
 
   size_t GetSize() const { return size_; }
 
@@ -199,7 +197,7 @@
   bool Equals(const Value* other) const override;
 
  private:
-  scoped_ptr<char[]> buffer_;
+  std::unique_ptr<char[]> buffer_;
   size_t size_;
 
   DISALLOW_COPY_AND_ASSIGN(BinaryValue);
@@ -210,8 +208,9 @@
 // are |std::string|s and should be UTF-8 encoded.
 class BASE_EXPORT DictionaryValue : public Value {
  public:
+  using Storage = std::map<std::string, std::unique_ptr<Value>>;
   // Returns |value| if it is a dictionary, nullptr otherwise.
-  static scoped_ptr<DictionaryValue> From(scoped_ptr<Value> value);
+  static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
 
   DictionaryValue();
   ~DictionaryValue() override;
@@ -239,7 +238,7 @@
   // If the key at any step of the way doesn't exist, or exists but isn't
   // a DictionaryValue, a new DictionaryValue will be created and attached
   // to the path in that location. |in_value| must be non-null.
-  void Set(const std::string& path, scoped_ptr<Value> in_value);
+  void Set(const std::string& path, std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
   void Set(const std::string& path, Value* in_value);
 
@@ -254,7 +253,7 @@
   // Like Set(), but without special treatment of '.'.  This allows e.g. URLs to
   // be used as paths.
   void SetWithoutPathExpansion(const std::string& key,
-                               scoped_ptr<Value> in_value);
+                               std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
   void SetWithoutPathExpansion(const std::string& key, Value* in_value);
 
@@ -329,21 +328,22 @@
   // |out_value|.  If |out_value| is NULL, the removed value will be deleted.
   // This method returns true if |path| is a valid path; otherwise it will
   // return false and the DictionaryValue object will be unchanged.
-  virtual bool Remove(const std::string& path, scoped_ptr<Value>* out_value);
+  virtual bool Remove(const std::string& path,
+                      std::unique_ptr<Value>* out_value);
 
   // Like Remove(), but without special treatment of '.'.  This allows e.g. URLs
   // to be used as paths.
   virtual bool RemoveWithoutPathExpansion(const std::string& key,
-                                          scoped_ptr<Value>* out_value);
+                                          std::unique_ptr<Value>* out_value);
 
   // Removes a path, clearing out all dictionaries on |path| that remain empty
   // after removing the value at |path|.
   virtual bool RemovePath(const std::string& path,
-                          scoped_ptr<Value>* out_value);
+                          std::unique_ptr<Value>* out_value);
 
   // Makes a copy of |this| but doesn't include empty dictionaries and lists in
   // the copy.  This never returns NULL, even if |this| itself is empty.
-  scoped_ptr<DictionaryValue> DeepCopyWithoutEmptyChildren() const;
+  std::unique_ptr<DictionaryValue> DeepCopyWithoutEmptyChildren() const;
 
   // Merge |dictionary| into this dictionary. This is done recursively, i.e. any
   // sub-dictionaries will be merged as well. In case of key collisions, the
@@ -360,6 +360,7 @@
   class BASE_EXPORT Iterator {
    public:
     explicit Iterator(const DictionaryValue& target);
+    Iterator(const Iterator& other);
     ~Iterator();
 
     bool IsAtEnd() const { return it_ == target_.dictionary_.end(); }
@@ -370,17 +371,17 @@
 
    private:
     const DictionaryValue& target_;
-    ValueMap::const_iterator it_;
+    Storage::const_iterator it_;
   };
 
   // Overridden from Value:
   DictionaryValue* DeepCopy() const override;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
-  scoped_ptr<DictionaryValue> CreateDeepCopy() const;
+  std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
   bool Equals(const Value* other) const override;
 
  private:
-  ValueMap dictionary_;
+  Storage dictionary_;
 
   DISALLOW_COPY_AND_ASSIGN(DictionaryValue);
 };
@@ -388,11 +389,12 @@
 // This type of Value represents a list of other Value values.
 class BASE_EXPORT ListValue : public Value {
  public:
-  typedef ValueVector::iterator iterator;
-  typedef ValueVector::const_iterator const_iterator;
+  using Storage = std::vector<std::unique_ptr<Value>>;
+  using const_iterator = Storage::const_iterator;
+  using iterator = Storage::iterator;
 
   // Returns |value| if it is a list, nullptr otherwise.
-  static scoped_ptr<ListValue> From(scoped_ptr<Value> value);
+  static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
 
   ListValue();
   ~ListValue() override;
@@ -413,7 +415,7 @@
   // the value is a null pointer.
   bool Set(size_t index, Value* in_value);
   // Preferred version of the above. TODO(estade): remove the above.
-  bool Set(size_t index, scoped_ptr<Value> in_value);
+  bool Set(size_t index, std::unique_ptr<Value> in_value);
 
   // Gets the Value at the given index.  Modifies |out_value| (and returns true)
   // only if the index falls within the current list range.
@@ -445,7 +447,7 @@
   // passed out via |out_value|.  If |out_value| is NULL, the removed value will
   // be deleted.  This method returns true if |index| is valid; otherwise
   // it will return false and the ListValue object will be unchanged.
-  virtual bool Remove(size_t index, scoped_ptr<Value>* out_value);
+  virtual bool Remove(size_t index, std::unique_ptr<Value>* out_value);
 
   // Removes the first instance of |value| found in the list, if any, and
   // deletes it. |index| is the location where |value| was found. Returns false
@@ -456,10 +458,10 @@
   // deleted, otherwise ownership of the value is passed back to the caller.
   // Returns an iterator pointing to the location of the element that
   // followed the erased element.
-  iterator Erase(iterator iter, scoped_ptr<Value>* out_value);
+  iterator Erase(iterator iter, std::unique_ptr<Value>* out_value);
 
   // Appends a Value to the end of the list.
-  void Append(scoped_ptr<Value> in_value);
+  void Append(std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
   void Append(Value* in_value);
 
@@ -503,10 +505,10 @@
   bool Equals(const Value* other) const override;
 
   // Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
-  scoped_ptr<ListValue> CreateDeepCopy() const;
+  std::unique_ptr<ListValue> CreateDeepCopy() const;
 
  private:
-  ValueVector list_;
+  Storage list_;
 
   DISALLOW_COPY_AND_ASSIGN(ListValue);
 };
@@ -532,8 +534,8 @@
   // error_code will be set with the underlying error.
   // If |error_message| is non-null, it will be filled in with a formatted
   // error message including the location of the error if appropriate.
-  virtual scoped_ptr<Value> Deserialize(int* error_code,
-                                        std::string* error_str) = 0;
+  virtual std::unique_ptr<Value> Deserialize(int* error_code,
+                                             std::string* error_str) = 0;
 };
 
 // Stream operator so Values can be used in assertion statements.  In order that
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 66453e0..d685222 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -2,15 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/values.h"
+
 #include <stddef.h>
 
 #include <limits>
+#include <memory>
 #include <utility>
 
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/strings/string16.h"
 #include "base/strings/utf_string_conversions.h"
-#include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -36,11 +38,11 @@
   ASSERT_FALSE(
     settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
 
-  scoped_ptr<ListValue> new_toolbar_bookmarks(new ListValue);
+  std::unique_ptr<ListValue> new_toolbar_bookmarks(new ListValue);
   settings.Set("global.toolbar.bookmarks", std::move(new_toolbar_bookmarks));
   ASSERT_TRUE(settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
 
-  scoped_ptr<DictionaryValue> new_bookmark(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> new_bookmark(new DictionaryValue);
   new_bookmark->SetString("name", "Froogle");
   new_bookmark->SetString("url", "http://froogle.com");
   toolbar_bookmarks->Append(std::move(new_bookmark));
@@ -59,11 +61,11 @@
 }
 
 TEST(ValuesTest, List) {
-  scoped_ptr<ListValue> mixed_list(new ListValue());
-  mixed_list->Set(0, make_scoped_ptr(new FundamentalValue(true)));
-  mixed_list->Set(1, make_scoped_ptr(new FundamentalValue(42)));
-  mixed_list->Set(2, make_scoped_ptr(new FundamentalValue(88.8)));
-  mixed_list->Set(3, make_scoped_ptr(new StringValue("foo")));
+  std::unique_ptr<ListValue> mixed_list(new ListValue());
+  mixed_list->Set(0, WrapUnique(new FundamentalValue(true)));
+  mixed_list->Set(1, WrapUnique(new FundamentalValue(42)));
+  mixed_list->Set(2, WrapUnique(new FundamentalValue(88.8)));
+  mixed_list->Set(3, WrapUnique(new StringValue("foo")));
   ASSERT_EQ(4u, mixed_list->GetSize());
 
   Value *value = NULL;
@@ -109,13 +111,13 @@
 
 TEST(ValuesTest, BinaryValue) {
   // Default constructor creates a BinaryValue with a null buffer and size 0.
-  scoped_ptr<BinaryValue> binary(new BinaryValue());
+  std::unique_ptr<BinaryValue> binary(new BinaryValue());
   ASSERT_TRUE(binary.get());
   ASSERT_EQ(NULL, binary->GetBuffer());
   ASSERT_EQ(0U, binary->GetSize());
 
   // Test the common case of a non-empty buffer
-  scoped_ptr<char[]> buffer(new char[15]);
+  std::unique_ptr<char[]> buffer(new char[15]);
   char* original_buffer = buffer.get();
   binary.reset(new BinaryValue(std::move(buffer), 15));
   ASSERT_TRUE(binary.get());
@@ -125,7 +127,7 @@
 
   char stack_buffer[42];
   memset(stack_buffer, '!', 42);
-  binary.reset(BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42));
+  binary = BinaryValue::CreateWithCopiedBuffer(stack_buffer, 42);
   ASSERT_TRUE(binary.get());
   ASSERT_TRUE(binary->GetBuffer());
   ASSERT_NE(stack_buffer, binary->GetBuffer());
@@ -141,10 +143,10 @@
 
 TEST(ValuesTest, StringValue) {
   // Test overloaded StringValue constructor.
-  scoped_ptr<Value> narrow_value(new StringValue("narrow"));
+  std::unique_ptr<Value> narrow_value(new StringValue("narrow"));
   ASSERT_TRUE(narrow_value.get());
   ASSERT_TRUE(narrow_value->IsType(Value::TYPE_STRING));
-  scoped_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
+  std::unique_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
   ASSERT_TRUE(utf16_value.get());
   ASSERT_TRUE(utf16_value->IsType(Value::TYPE_STRING));
 
@@ -198,14 +200,14 @@
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
   }
   EXPECT_TRUE(deletion_flag);
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     list.Clear();
     EXPECT_TRUE(deletion_flag);
@@ -213,7 +215,7 @@
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(list.Set(0, Value::CreateNullValue()));
     EXPECT_TRUE(deletion_flag);
@@ -222,11 +224,11 @@
 
 TEST(ValuesTest, ListRemoval) {
   bool deletion_flag = true;
-  scoped_ptr<Value> removed_item;
+  std::unique_ptr<Value> removed_item;
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_EQ(1U, list.GetSize());
     EXPECT_FALSE(list.Remove(std::numeric_limits<size_t>::max(),
@@ -242,7 +244,7 @@
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(list.Remove(0, NULL));
     EXPECT_TRUE(deletion_flag);
@@ -251,7 +253,8 @@
 
   {
     ListValue list;
-    scoped_ptr<DeletionTestValue> value(new DeletionTestValue(&deletion_flag));
+    std::unique_ptr<DeletionTestValue> value(
+        new DeletionTestValue(&deletion_flag));
     DeletionTestValue* original_value = value.get();
     list.Append(std::move(value));
     EXPECT_FALSE(deletion_flag);
@@ -269,14 +272,14 @@
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
   }
   EXPECT_TRUE(deletion_flag);
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     dict.Clear();
     EXPECT_TRUE(deletion_flag);
@@ -284,7 +287,7 @@
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     dict.Set(key, Value::CreateNullValue());
     EXPECT_TRUE(deletion_flag);
@@ -294,11 +297,11 @@
 TEST(ValuesTest, DictionaryRemoval) {
   std::string key = "test";
   bool deletion_flag = true;
-  scoped_ptr<Value> removed_item;
+  std::unique_ptr<Value> removed_item;
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(dict.HasKey(key));
     EXPECT_FALSE(dict.Remove("absent key", &removed_item));
@@ -312,7 +315,7 @@
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(dict.HasKey(key));
     EXPECT_TRUE(dict.Remove(key, NULL));
@@ -372,7 +375,7 @@
   dict.SetInteger("a.long.way.down", 1);
   dict.SetBoolean("a.long.key.path", true);
 
-  scoped_ptr<Value> removed_item;
+  std::unique_ptr<Value> removed_item;
   EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
   ASSERT_TRUE(removed_item);
   EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_INTEGER));
@@ -394,49 +397,52 @@
 
 TEST(ValuesTest, DeepCopy) {
   DictionaryValue original_dict;
-  scoped_ptr<Value> scoped_null = Value::CreateNullValue();
+  std::unique_ptr<Value> scoped_null = Value::CreateNullValue();
   Value* original_null = scoped_null.get();
   original_dict.Set("null", std::move(scoped_null));
-  scoped_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+  std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
   FundamentalValue* original_bool = scoped_bool.get();
   original_dict.Set("bool", std::move(scoped_bool));
-  scoped_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+  std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
   FundamentalValue* original_int = scoped_int.get();
   original_dict.Set("int", std::move(scoped_int));
-  scoped_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+  std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
   FundamentalValue* original_double = scoped_double.get();
   original_dict.Set("double", std::move(scoped_double));
-  scoped_ptr<StringValue> scoped_string(new StringValue("hello"));
+  std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
   StringValue* original_string = scoped_string.get();
   original_dict.Set("string", std::move(scoped_string));
-  scoped_ptr<StringValue> scoped_string16(
+  std::unique_ptr<StringValue> scoped_string16(
       new StringValue(ASCIIToUTF16("hello16")));
   StringValue* original_string16 = scoped_string16.get();
   original_dict.Set("string16", std::move(scoped_string16));
 
-  scoped_ptr<char[]> original_buffer(new char[42]);
+  std::unique_ptr<char[]> original_buffer(new char[42]);
   memset(original_buffer.get(), '!', 42);
-  scoped_ptr<BinaryValue> scoped_binary(
+  std::unique_ptr<BinaryValue> scoped_binary(
       new BinaryValue(std::move(original_buffer), 42));
   BinaryValue* original_binary = scoped_binary.get();
   original_dict.Set("binary", std::move(scoped_binary));
 
-  scoped_ptr<ListValue> scoped_list(new ListValue());
+  std::unique_ptr<ListValue> scoped_list(new ListValue());
   Value* original_list = scoped_list.get();
-  scoped_ptr<FundamentalValue> scoped_list_element_0(new FundamentalValue(0));
+  std::unique_ptr<FundamentalValue> scoped_list_element_0(
+      new FundamentalValue(0));
   Value* original_list_element_0 = scoped_list_element_0.get();
   scoped_list->Append(std::move(scoped_list_element_0));
-  scoped_ptr<FundamentalValue> scoped_list_element_1(new FundamentalValue(1));
+  std::unique_ptr<FundamentalValue> scoped_list_element_1(
+      new FundamentalValue(1));
   Value* original_list_element_1 = scoped_list_element_1.get();
   scoped_list->Append(std::move(scoped_list_element_1));
   original_dict.Set("list", std::move(scoped_list));
 
-  scoped_ptr<DictionaryValue> scoped_nested_dictionary(new DictionaryValue());
+  std::unique_ptr<DictionaryValue> scoped_nested_dictionary(
+      new DictionaryValue());
   Value* original_nested_dictionary = scoped_nested_dictionary.get();
   scoped_nested_dictionary->SetString("key", "value");
   original_dict.Set("dictionary", std::move(scoped_nested_dictionary));
 
-  scoped_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
+  std::unique_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
   ASSERT_TRUE(copy_dict.get());
   ASSERT_NE(copy_dict.get(), &original_dict);
 
@@ -546,8 +552,8 @@
 }
 
 TEST(ValuesTest, Equals) {
-  scoped_ptr<Value> null1(Value::CreateNullValue());
-  scoped_ptr<Value> null2(Value::CreateNullValue());
+  std::unique_ptr<Value> null1(Value::CreateNullValue());
+  std::unique_ptr<Value> null2(Value::CreateNullValue());
   EXPECT_NE(null1.get(), null2.get());
   EXPECT_TRUE(null1->Equals(null2.get()));
 
@@ -562,21 +568,21 @@
   dv.SetString("d2", ASCIIToUTF16("http://google.com"));
   dv.Set("e", Value::CreateNullValue());
 
-  scoped_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
+  std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
   EXPECT_TRUE(dv.Equals(copy.get()));
 
-  scoped_ptr<ListValue> list(new ListValue);
+  std::unique_ptr<ListValue> list(new ListValue);
   ListValue* original_list = list.get();
   list->Append(Value::CreateNullValue());
-  list->Append(make_scoped_ptr(new DictionaryValue));
-  scoped_ptr<Value> list_copy(list->CreateDeepCopy());
+  list->Append(WrapUnique(new DictionaryValue));
+  std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
 
   dv.Set("f", std::move(list));
   EXPECT_FALSE(dv.Equals(copy.get()));
   copy->Set("f", std::move(list_copy));
   EXPECT_TRUE(dv.Equals(copy.get()));
 
-  original_list->Append(make_scoped_ptr(new FundamentalValue(true)));
+  original_list->Append(WrapUnique(new FundamentalValue(true)));
   EXPECT_FALSE(dv.Equals(copy.get()));
 
   // Check if Equals detects differences in only the keys.
@@ -588,14 +594,14 @@
 }
 
 TEST(ValuesTest, StaticEquals) {
-  scoped_ptr<Value> null1(Value::CreateNullValue());
-  scoped_ptr<Value> null2(Value::CreateNullValue());
+  std::unique_ptr<Value> null1(Value::CreateNullValue());
+  std::unique_ptr<Value> null2(Value::CreateNullValue());
   EXPECT_TRUE(Value::Equals(null1.get(), null2.get()));
   EXPECT_TRUE(Value::Equals(NULL, NULL));
 
-  scoped_ptr<Value> i42(new FundamentalValue(42));
-  scoped_ptr<Value> j42(new FundamentalValue(42));
-  scoped_ptr<Value> i17(new FundamentalValue(17));
+  std::unique_ptr<Value> i42(new FundamentalValue(42));
+  std::unique_ptr<Value> j42(new FundamentalValue(42));
+  std::unique_ptr<Value> i17(new FundamentalValue(17));
   EXPECT_TRUE(Value::Equals(i42.get(), i42.get()));
   EXPECT_TRUE(Value::Equals(j42.get(), i42.get()));
   EXPECT_TRUE(Value::Equals(i42.get(), j42.get()));
@@ -612,50 +618,52 @@
 
 TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
   DictionaryValue original_dict;
-  scoped_ptr<Value> scoped_null(Value::CreateNullValue());
+  std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
   Value* original_null = scoped_null.get();
   original_dict.Set("null", std::move(scoped_null));
-  scoped_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+  std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
   Value* original_bool = scoped_bool.get();
   original_dict.Set("bool", std::move(scoped_bool));
-  scoped_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+  std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
   Value* original_int = scoped_int.get();
   original_dict.Set("int", std::move(scoped_int));
-  scoped_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+  std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
   Value* original_double = scoped_double.get();
   original_dict.Set("double", std::move(scoped_double));
-  scoped_ptr<StringValue> scoped_string(new StringValue("hello"));
+  std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
   Value* original_string = scoped_string.get();
   original_dict.Set("string", std::move(scoped_string));
-  scoped_ptr<StringValue> scoped_string16(
+  std::unique_ptr<StringValue> scoped_string16(
       new StringValue(ASCIIToUTF16("hello16")));
   Value* original_string16 = scoped_string16.get();
   original_dict.Set("string16", std::move(scoped_string16));
 
-  scoped_ptr<char[]> original_buffer(new char[42]);
+  std::unique_ptr<char[]> original_buffer(new char[42]);
   memset(original_buffer.get(), '!', 42);
-  scoped_ptr<BinaryValue> scoped_binary(
+  std::unique_ptr<BinaryValue> scoped_binary(
       new BinaryValue(std::move(original_buffer), 42));
   Value* original_binary = scoped_binary.get();
   original_dict.Set("binary", std::move(scoped_binary));
 
-  scoped_ptr<ListValue> scoped_list(new ListValue());
+  std::unique_ptr<ListValue> scoped_list(new ListValue());
   Value* original_list = scoped_list.get();
-  scoped_ptr<FundamentalValue> scoped_list_element_0(new FundamentalValue(0));
+  std::unique_ptr<FundamentalValue> scoped_list_element_0(
+      new FundamentalValue(0));
   scoped_list->Append(std::move(scoped_list_element_0));
-  scoped_ptr<FundamentalValue> scoped_list_element_1(new FundamentalValue(1));
+  std::unique_ptr<FundamentalValue> scoped_list_element_1(
+      new FundamentalValue(1));
   scoped_list->Append(std::move(scoped_list_element_1));
   original_dict.Set("list", std::move(scoped_list));
 
-  scoped_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
-  scoped_ptr<Value> copy_null = original_null->CreateDeepCopy();
-  scoped_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
-  scoped_ptr<Value> copy_int = original_int->CreateDeepCopy();
-  scoped_ptr<Value> copy_double = original_double->CreateDeepCopy();
-  scoped_ptr<Value> copy_string = original_string->CreateDeepCopy();
-  scoped_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
-  scoped_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
-  scoped_ptr<Value> copy_list = original_list->CreateDeepCopy();
+  std::unique_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
+  std::unique_ptr<Value> copy_null = original_null->CreateDeepCopy();
+  std::unique_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
+  std::unique_ptr<Value> copy_int = original_int->CreateDeepCopy();
+  std::unique_ptr<Value> copy_double = original_double->CreateDeepCopy();
+  std::unique_ptr<Value> copy_string = original_string->CreateDeepCopy();
+  std::unique_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
+  std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
+  std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
 
   EXPECT_TRUE(original_dict.Equals(copy_dict.get()));
   EXPECT_TRUE(original_null->Equals(copy_null.get()));
@@ -669,18 +677,18 @@
 }
 
 TEST(ValuesTest, RemoveEmptyChildren) {
-  scoped_ptr<DictionaryValue> root(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> root(new DictionaryValue);
   // Remove empty lists and dictionaries.
-  root->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
-  root->Set("empty_list", make_scoped_ptr(new ListValue));
+  root->Set("empty_dict", WrapUnique(new DictionaryValue));
+  root->Set("empty_list", WrapUnique(new ListValue));
   root->SetWithoutPathExpansion("a.b.c.d.e",
-                                make_scoped_ptr(new DictionaryValue));
+                                WrapUnique(new DictionaryValue));
   root = root->DeepCopyWithoutEmptyChildren();
   EXPECT_TRUE(root->empty());
 
   // Make sure we don't prune too much.
   root->SetBoolean("bool", true);
-  root->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
+  root->Set("empty_dict", WrapUnique(new DictionaryValue));
   root->SetString("empty_string", std::string());
   root = root->DeepCopyWithoutEmptyChildren();
   EXPECT_EQ(2U, root->size());
@@ -692,22 +700,22 @@
   // Nested test cases.  These should all reduce back to the bool and string
   // set above.
   {
-    root->Set("a.b.c.d.e", make_scoped_ptr(new DictionaryValue));
+    root->Set("a.b.c.d.e", WrapUnique(new DictionaryValue));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
   }
   {
-    scoped_ptr<DictionaryValue> inner(new DictionaryValue);
-    inner->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
-    inner->Set("empty_list", make_scoped_ptr(new ListValue));
+    std::unique_ptr<DictionaryValue> inner(new DictionaryValue);
+    inner->Set("empty_dict", WrapUnique(new DictionaryValue));
+    inner->Set("empty_list", WrapUnique(new ListValue));
     root->Set("dict_with_empty_children", std::move(inner));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
   }
   {
-    scoped_ptr<ListValue> inner(new ListValue);
-    inner->Append(make_scoped_ptr(new DictionaryValue));
-    inner->Append(make_scoped_ptr(new ListValue));
+    std::unique_ptr<ListValue> inner(new ListValue);
+    inner->Append(WrapUnique(new DictionaryValue));
+    inner->Append(WrapUnique(new ListValue));
     root->Set("list_with_empty_children", std::move(inner));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
@@ -715,13 +723,13 @@
 
   // Nested with siblings.
   {
-    scoped_ptr<ListValue> inner(new ListValue());
-    inner->Append(make_scoped_ptr(new DictionaryValue));
-    inner->Append(make_scoped_ptr(new ListValue));
+    std::unique_ptr<ListValue> inner(new ListValue());
+    inner->Append(WrapUnique(new DictionaryValue));
+    inner->Append(WrapUnique(new ListValue));
     root->Set("list_with_empty_children", std::move(inner));
-    scoped_ptr<DictionaryValue> inner2(new DictionaryValue);
-    inner2->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
-    inner2->Set("empty_list", make_scoped_ptr(new ListValue));
+    std::unique_ptr<DictionaryValue> inner2(new DictionaryValue);
+    inner2->Set("empty_dict", WrapUnique(new DictionaryValue));
+    inner2->Set("empty_list", WrapUnique(new ListValue));
     root->Set("dict_with_empty_children", std::move(inner2));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
@@ -729,10 +737,10 @@
 
   // Make sure nested values don't get pruned.
   {
-    scoped_ptr<ListValue> inner(new ListValue);
-    scoped_ptr<ListValue> inner2(new ListValue);
-    inner2->Append(make_scoped_ptr(new StringValue("hello")));
-    inner->Append(make_scoped_ptr(new DictionaryValue));
+    std::unique_ptr<ListValue> inner(new ListValue);
+    std::unique_ptr<ListValue> inner2(new ListValue);
+    inner2->Append(WrapUnique(new StringValue("hello")));
+    inner->Append(WrapUnique(new DictionaryValue));
     inner->Append(std::move(inner2));
     root->Set("list_with_empty_children", std::move(inner));
     root = root->DeepCopyWithoutEmptyChildren();
@@ -747,18 +755,18 @@
 }
 
 TEST(ValuesTest, MergeDictionary) {
-  scoped_ptr<DictionaryValue> base(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> base(new DictionaryValue);
   base->SetString("base_key", "base_key_value_base");
   base->SetString("collide_key", "collide_key_value_base");
-  scoped_ptr<DictionaryValue> base_sub_dict(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> base_sub_dict(new DictionaryValue);
   base_sub_dict->SetString("sub_base_key", "sub_base_key_value_base");
   base_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_base");
   base->Set("sub_dict_key", std::move(base_sub_dict));
 
-  scoped_ptr<DictionaryValue> merge(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> merge(new DictionaryValue);
   merge->SetString("merge_key", "merge_key_value_merge");
   merge->SetString("collide_key", "collide_key_value_merge");
-  scoped_ptr<DictionaryValue> merge_sub_dict(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> merge_sub_dict(new DictionaryValue);
   merge_sub_dict->SetString("sub_merge_key", "sub_merge_key_value_merge");
   merge_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_merge");
   merge->Set("sub_dict_key", std::move(merge_sub_dict));
@@ -792,7 +800,7 @@
 }
 
 TEST(ValuesTest, MergeDictionaryDeepCopy) {
-  scoped_ptr<DictionaryValue> child(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> child(new DictionaryValue);
   DictionaryValue* original_child = child.get();
   child->SetString("test", "value");
   EXPECT_EQ(1U, child->size());
@@ -801,7 +809,7 @@
   EXPECT_TRUE(child->GetString("test", &value));
   EXPECT_EQ("value", value);
 
-  scoped_ptr<DictionaryValue> base(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> base(new DictionaryValue);
   base->Set("dict", std::move(child));
   EXPECT_EQ(1U, base->size());
 
@@ -809,7 +817,7 @@
   EXPECT_TRUE(base->GetDictionary("dict", &ptr));
   EXPECT_EQ(original_child, ptr);
 
-  scoped_ptr<DictionaryValue> merged(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> merged(new DictionaryValue);
   merged->MergeDictionary(base.get());
   EXPECT_EQ(1U, merged->size());
   EXPECT_TRUE(merged->GetDictionary("dict", &ptr));
diff --git a/base/version.cc b/base/version.cc
new file mode 100644
index 0000000..02213fb
--- /dev/null
+++ b/base/version.cc
@@ -0,0 +1,193 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Parses the |numbers| vector representing the different numbers
+// inside the version string and constructs a vector of valid integers. It stops
+// when it reaches an invalid item (including the wildcard character). |parsed|
+// is the resulting integer vector. Function returns true if all numbers were
+// parsed successfully, false otherwise.
+bool ParseVersionNumbers(const std::string& version_str,
+                         std::vector<uint32_t>* parsed) {
+  std::vector<StringPiece> numbers =
+      SplitStringPiece(version_str, ".", KEEP_WHITESPACE, SPLIT_WANT_ALL);
+  if (numbers.empty())
+    return false;
+
+  for (auto it = numbers.begin(); it != numbers.end(); ++it) {
+    if (StartsWith(*it, "+", CompareCase::SENSITIVE))
+      return false;
+
+    unsigned int num;
+    if (!StringToUint(*it, &num))
+      return false;
+
+    // This throws out leading zeros for the first item only.
+    if (it == numbers.begin() && UintToString(num) != *it)
+      return false;
+
+    // StringToUint returns unsigned int but Version fields are uint32_t.
+    static_assert(sizeof (uint32_t) == sizeof (unsigned int),
+        "uint32_t must be same as unsigned int");
+    parsed->push_back(num);
+  }
+  return true;
+}
+
+// Compares version components in |components1| with components in
+// |components2|. Returns -1, 0 or 1 if |components1| is less than, equal to,
+// or greater than |components2|, respectively.
+int CompareVersionComponents(const std::vector<uint32_t>& components1,
+                             const std::vector<uint32_t>& components2) {
+  const size_t count = std::min(components1.size(), components2.size());
+  for (size_t i = 0; i < count; ++i) {
+    if (components1[i] > components2[i])
+      return 1;
+    if (components1[i] < components2[i])
+      return -1;
+  }
+  if (components1.size() > components2.size()) {
+    for (size_t i = count; i < components1.size(); ++i) {
+      if (components1[i] > 0)
+        return 1;
+    }
+  } else if (components1.size() < components2.size()) {
+    for (size_t i = count; i < components2.size(); ++i) {
+      if (components2[i] > 0)
+        return -1;
+    }
+  }
+  return 0;
+}
+
+}  // namespace
+
+Version::Version() {
+}
+
+Version::Version(const Version& other) = default;
+
+Version::~Version() {
+}
+
+Version::Version(const std::string& version_str) {
+  std::vector<uint32_t> parsed;
+  if (!ParseVersionNumbers(version_str, &parsed))
+    return;
+
+  components_.swap(parsed);
+}
+
+bool Version::IsValid() const {
+  return (!components_.empty());
+}
+
+// static
+bool Version::IsValidWildcardString(const std::string& wildcard_string) {
+  std::string version_string = wildcard_string;
+  if (EndsWith(version_string, ".*", CompareCase::SENSITIVE))
+    version_string.resize(version_string.size() - 2);
+
+  Version version(version_string);
+  return version.IsValid();
+}
+
+int Version::CompareToWildcardString(const std::string& wildcard_string) const {
+  DCHECK(IsValid());
+  DCHECK(Version::IsValidWildcardString(wildcard_string));
+
+  // Default behavior if the string doesn't end with a wildcard.
+  if (!EndsWith(wildcard_string, ".*", CompareCase::SENSITIVE)) {
+    Version version(wildcard_string);
+    DCHECK(version.IsValid());
+    return CompareTo(version);
+  }
+
+  std::vector<uint32_t> parsed;
+  const bool success = ParseVersionNumbers(
+      wildcard_string.substr(0, wildcard_string.length() - 2), &parsed);
+  DCHECK(success);
+  const int comparison = CompareVersionComponents(components_, parsed);
+  // If the version is smaller than the wildcard version's |parsed| vector,
+  // then the wildcard has no effect (e.g. comparing 1.2.3 and 1.3.*) and the
+  // version is still smaller. Same logic for equality (e.g. comparing 1.2.2 to
+  // 1.2.2.* is 0 regardless of the wildcard). Under this logic,
+  // 1.2.0.0.0.0 compared to 1.2.* is 0.
+  if (comparison == -1 || comparison == 0)
+    return comparison;
+
+  // Catch the case where the digits of |parsed| are found in |components_|,
+  // which means that the two are equal since |parsed| has a trailing "*".
+  // (e.g. 1.2.3 vs. 1.2.* will return 0). All other cases return 1 since
+  // components is greater (e.g. 3.2.3 vs 1.*).
+  DCHECK_GT(parsed.size(), 0UL);
+  const size_t min_num_comp = std::min(components_.size(), parsed.size());
+  for (size_t i = 0; i < min_num_comp; ++i) {
+    if (components_[i] != parsed[i])
+      return 1;
+  }
+  return 0;
+}
+
+int Version::CompareTo(const Version& other) const {
+  DCHECK(IsValid());
+  DCHECK(other.IsValid());
+  return CompareVersionComponents(components_, other.components_);
+}
+
+const std::string Version::GetString() const {
+  DCHECK(IsValid());
+  std::string version_str;
+  size_t count = components_.size();
+  for (size_t i = 0; i < count - 1; ++i) {
+    version_str.append(UintToString(components_[i]));
+    version_str.append(".");
+  }
+  version_str.append(UintToString(components_[count - 1]));
+  return version_str;
+}
+
+bool operator==(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) == 0;
+}
+
+bool operator!=(const Version& v1, const Version& v2) {
+  return !(v1 == v2);
+}
+
+bool operator<(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) < 0;
+}
+
+bool operator<=(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) <= 0;
+}
+
+bool operator>(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) > 0;
+}
+
+bool operator>=(const Version& v1, const Version& v2) {
+  return v1.CompareTo(v2) >= 0;
+}
+
+std::ostream& operator<<(std::ostream& stream, const Version& v) {
+  return stream << v.GetString();
+}
+
+}  // namespace base
diff --git a/base/version.h b/base/version.h
new file mode 100644
index 0000000..25b570a
--- /dev/null
+++ b/base/version.h
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_VERSION_H_
+#define BASE_VERSION_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Version represents a dotted version number, like "1.2.3.4", supporting
+// parsing and comparison.
+class BASE_EXPORT Version {
+ public:
+  // The only thing you can legally do to a default constructed
+  // Version object is assign to it.
+  Version();
+
+  Version(const Version& other);
+
+  ~Version();
+
+  // Initializes from a decimal dotted version number, like "0.1.1".
+  // Each component is limited to a uint16_t. Call IsValid() to learn
+  // the outcome.
+  explicit Version(const std::string& version_str);
+
+  // Returns true if the object contains a valid version number.
+  bool IsValid() const;
+
+  // Returns true if the version wildcard string is valid. The version wildcard
+  // string may end with ".*" (e.g. 1.2.*, 1.*). Any other arrangement with "*"
+  // is invalid (e.g. 1.*.3 or 1.2.3*). This functions defaults to standard
+  // Version behavior (IsValid) if no wildcard is present.
+  static bool IsValidWildcardString(const std::string& wildcard_string);
+
+  // Returns -1, 0, 1 for <, ==, >.
+  int CompareTo(const Version& other) const;
+
+  // Given a valid version object, compare if a |wildcard_string| results in a
+  // newer version. This function will default to CompareTo if the string does
+  // not end in wildcard sequence ".*". IsValidWildcard(wildcard_string) must be
+  // true before using this function.
+  int CompareToWildcardString(const std::string& wildcard_string) const;
+
+  // Return the string representation of this version.
+  const std::string GetString() const;
+
+  const std::vector<uint32_t>& components() const { return components_; }
+
+ private:
+  std::vector<uint32_t> components_;
+};
+
+BASE_EXPORT bool operator==(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator!=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator<=(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>(const Version& v1, const Version& v2);
+BASE_EXPORT bool operator>=(const Version& v1, const Version& v2);
+BASE_EXPORT std::ostream& operator<<(std::ostream& stream, const Version& v);
+
+}  // namespace base
+
+// TODO(xhwang) remove this when all users are updated to explicitly use the
+// namespace
+using base::Version;
+
+#endif  // BASE_VERSION_H_
diff --git a/base/version_unittest.cc b/base/version_unittest.cc
new file mode 100644
index 0000000..5d9ea99
--- /dev/null
+++ b/base/version_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/version.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/macros.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+TEST(VersionTest, DefaultConstructor) {
+  Version v;
+  EXPECT_FALSE(v.IsValid());
+}
+
+TEST(VersionTest, ValueSemantics) {
+  Version v1("1.2.3.4");
+  EXPECT_TRUE(v1.IsValid());
+  Version v3;
+  EXPECT_FALSE(v3.IsValid());
+  {
+    Version v2(v1);
+    v3 = v2;
+    EXPECT_TRUE(v2.IsValid());
+    EXPECT_EQ(v1, v2);
+  }
+  EXPECT_EQ(v3, v1);
+}
+
+TEST(VersionTest, GetVersionFromString) {
+  static const struct version_string {
+    const char* input;
+    size_t parts;
+    uint32_t firstpart;
+    bool success;
+  } cases[] = {
+    {"", 0, 0, false},
+    {" ", 0, 0, false},
+    {"\t", 0, 0, false},
+    {"\n", 0, 0, false},
+    {"  ", 0, 0, false},
+    {".", 0, 0, false},
+    {" . ", 0, 0, false},
+    {"0", 1, 0, true},
+    {"0.", 0, 0, false},
+    {"0.0", 2, 0, true},
+    {"4294967295.0", 2, 4294967295, true},
+    {"4294967296.0", 0, 0, false},
+    {"-1.0", 0, 0, false},
+    {"1.-1.0", 0, 0, false},
+    {"1,--1.0", 0, 0, false},
+    {"+1.0", 0, 0, false},
+    {"1.+1.0", 0, 0, false},
+    {"1+1.0", 0, 0, false},
+    {"++1.0", 0, 0, false},
+    {"1.0a", 0, 0, false},
+    {"1.2.3.4.5.6.7.8.9.0", 10, 1, true},
+    {"02.1", 0, 0, false},
+    {"0.01", 2, 0, true},
+    {"f.1", 0, 0, false},
+    {"15.007.20011", 3, 15, true},
+    {"15.5.28.130162", 4, 15, true},
+  };
+
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    Version version(cases[i].input);
+    EXPECT_EQ(cases[i].success, version.IsValid());
+    if (cases[i].success) {
+      EXPECT_EQ(cases[i].parts, version.components().size());
+      EXPECT_EQ(cases[i].firstpart, version.components()[0]);
+    }
+  }
+}
+
+TEST(VersionTest, Compare) {
+  static const struct version_compare {
+    const char* lhs;
+    const char* rhs;
+    int expected;
+  } cases[] = {
+    {"1.0", "1.0", 0},
+    {"1.0", "0.0", 1},
+    {"1.0", "2.0", -1},
+    {"1.0", "1.1", -1},
+    {"1.1", "1.0", 1},
+    {"1.0", "1.0.1", -1},
+    {"1.1", "1.0.1", 1},
+    {"1.1", "1.0.1", 1},
+    {"1.0.0", "1.0", 0},
+    {"1.0.3", "1.0.20", -1},
+    {"11.0.10", "15.007.20011", -1},
+    {"11.0.10", "15.5.28.130162", -1},
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    Version lhs(cases[i].lhs);
+    Version rhs(cases[i].rhs);
+    EXPECT_EQ(lhs.CompareTo(rhs), cases[i].expected) <<
+        cases[i].lhs << " ? " << cases[i].rhs;
+
+    // Test comparison operators
+    switch (cases[i].expected) {
+    case -1:
+      EXPECT_LT(lhs, rhs);
+      EXPECT_LE(lhs, rhs);
+      EXPECT_NE(lhs, rhs);
+      EXPECT_FALSE(lhs == rhs);
+      EXPECT_FALSE(lhs >= rhs);
+      EXPECT_FALSE(lhs > rhs);
+      break;
+    case 0:
+      EXPECT_FALSE(lhs < rhs);
+      EXPECT_LE(lhs, rhs);
+      EXPECT_FALSE(lhs != rhs);
+      EXPECT_EQ(lhs, rhs);
+      EXPECT_GE(lhs, rhs);
+      EXPECT_FALSE(lhs > rhs);
+      break;
+    case 1:
+      EXPECT_FALSE(lhs < rhs);
+      EXPECT_FALSE(lhs <= rhs);
+      EXPECT_NE(lhs, rhs);
+      EXPECT_FALSE(lhs == rhs);
+      EXPECT_GE(lhs, rhs);
+      EXPECT_GT(lhs, rhs);
+      break;
+    }
+  }
+}
+
+TEST(VersionTest, CompareToWildcardString) {
+  static const struct version_compare {
+    const char* lhs;
+    const char* rhs;
+    int expected;
+  } cases[] = {
+    {"1.0", "1.*", 0},
+    {"1.0", "0.*", 1},
+    {"1.0", "2.*", -1},
+    {"1.2.3", "1.2.3.*", 0},
+    {"10.0", "1.0.*", 1},
+    {"1.0", "3.0.*", -1},
+    {"1.4", "1.3.0.*", 1},
+    {"1.3.9", "1.3.*", 0},
+    {"1.4.1", "1.3.*", 1},
+    {"1.3", "1.4.5.*", -1},
+    {"1.5", "1.4.5.*", 1},
+    {"1.3.9", "1.3.*", 0},
+    {"1.2.0.0.0.0", "1.2.*", 0},
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    const Version version(cases[i].lhs);
+    const int result = version.CompareToWildcardString(cases[i].rhs);
+    EXPECT_EQ(result, cases[i].expected) << cases[i].lhs << "?" << cases[i].rhs;
+  }
+}
+
+TEST(VersionTest, IsValidWildcardString) {
+  static const struct version_compare {
+    const char* version;
+    bool expected;
+  } cases[] = {
+    {"1.0", true},
+    {"", false},
+    {"1.2.3.4.5.6", true},
+    {"1.2.3.*", true},
+    {"1.2.3.5*", false},
+    {"1.2.3.56*", false},
+    {"1.*.3", false},
+    {"20.*", true},
+    {"+2.*", false},
+    {"*", false},
+    {"*.2", false},
+  };
+  for (size_t i = 0; i < arraysize(cases); ++i) {
+    EXPECT_EQ(Version::IsValidWildcardString(cases[i].version),
+        cases[i].expected) << cases[i].version << "?" << cases[i].expected;
+  }
+}
+
+}  // namespace
diff --git a/base/win/scoped_handle_test_dll.cc b/base/win/scoped_handle_test_dll.cc
new file mode 100644
index 0000000..c72e459
--- /dev/null
+++ b/base/win/scoped_handle_test_dll.cc
@@ -0,0 +1,125 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include <vector>
+
+#include "base/win/base_features.h"
+#include "base/win/current_module.h"
+#include "base/win/scoped_handle.h"
+
+namespace base {
+namespace win {
+namespace testing {
+
+extern "C" bool __declspec(dllexport) RunTest();
+
+namespace {
+
+struct ThreadParams {
+  HANDLE ready_event;
+  HANDLE start_event;
+};
+
+// Note, this must use all native functions to avoid instantiating the
+// ActiveVerifier. e.g. can't use base::Thread or even base::PlatformThread.
+DWORD __stdcall ThreadFunc(void* params) {
+  ThreadParams* thread_params = reinterpret_cast<ThreadParams*>(params);
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+
+  ::SetEvent(thread_params->ready_event);
+  ::WaitForSingleObject(thread_params->start_event, INFINITE);
+  ScopedHandle handle_holder(handle);
+  return 0;
+}
+
+bool InternalRunThreadTest() {
+  std::vector<HANDLE> threads_;
+  // From manual testing, the bug fixed by crrev.com/678736a starts reliably
+  // causing handle verifier asserts to trigger at around 100 threads, so make
+  // it 200 to be sure to detect any future regressions.
+  const size_t kNumThreads = 200;
+
+  // bManualReset is set to true to allow signalling multiple threads.
+  HANDLE start_event = ::CreateEvent(nullptr, true, false, nullptr);
+  if (!start_event)
+    return false;
+
+  HANDLE ready_event = CreateEvent(nullptr, false, false, nullptr);
+  if (!ready_event)
+    return false;
+
+  ThreadParams thread_params = { ready_event, start_event };
+
+  for (size_t i = 0; i < kNumThreads; i++) {
+    HANDLE thread_handle =
+        ::CreateThread(nullptr, 0, ThreadFunc,
+                       reinterpret_cast<void*>(&thread_params), 0, nullptr);
+    if (!thread_handle)
+      break;
+    ::WaitForSingleObject(ready_event, INFINITE);
+    threads_.push_back(thread_handle);
+  }
+
+  ::CloseHandle(ready_event);
+
+  if (threads_.size() != kNumThreads) {
+    for (const auto& thread : threads_)
+      ::CloseHandle(thread);
+    ::CloseHandle(start_event);
+    return false;
+  }
+
+  ::SetEvent(start_event);
+  ::CloseHandle(start_event);
+  for (const auto& thread : threads_) {
+    ::WaitForSingleObject(thread, INFINITE);
+    ::CloseHandle(thread);
+  }
+
+  return true;
+}
+
+bool InternalRunLocationTest() {
+  // Create a new handle and then set LastError again.
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  if (!handle)
+    return false;
+  ScopedHandle handle_holder(handle);
+
+  HMODULE verifier_module = GetHandleVerifierModuleForTesting();
+  if (!verifier_module)
+    return false;
+
+  // Get my module
+  HMODULE my_module = CURRENT_MODULE();
+  if (!my_module)
+    return false;
+
+  HMODULE main_module = ::GetModuleHandle(NULL);
+
+#if BUILDFLAG(SINGLE_MODULE_MODE_HANDLE_VERIFIER)
+  // In a component build ActiveVerifier will always be created inside base.dll
+  // as the code always lives there.
+  if (verifier_module == my_module || verifier_module == main_module)
+    return false;
+#else
+  // In a non-component build, ActiveVerifier should always be created in the
+  // version of base linked with the main executable.
+  if (verifier_module == my_module || verifier_module != main_module)
+    return false;
+#endif
+  return true;
+}
+
+}  // namespace
+
+bool RunTest() {
+  return InternalRunThreadTest() && InternalRunLocationTest();
+}
+
+}  // testing
+}  // win
+}  // base
diff --git a/base/win/windows_version_unittest.cc b/base/win/windows_version_unittest.cc
new file mode 100644
index 0000000..f0d6d96
--- /dev/null
+++ b/base/win/windows_version_unittest.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/windows_version.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+namespace {
+
+TEST(WindowsVersion, GetVersionExAndKernelVersionMatch) {
+  // If this fails, we're running in compatibility mode, or need to update the
+  // application manifest.
+  EXPECT_EQ(OSInfo::GetInstance()->version(),
+            OSInfo::GetInstance()->Kernel32Version());
+}
+
+}  // namespace
+}  // namespace win
+}  // namespace base
diff --git a/build/build_config.h b/build/build_config.h
index e152a66..80a93d3 100644
--- a/build/build_config.h
+++ b/build/build_config.h
@@ -159,7 +159,7 @@
 #define ARCH_CPU_LITTLE_ENDIAN 1
 #elif defined(__MIPSEL__)
 #if defined(__LP64__)
-#define ARCH_CPU_MIPS64_FAMILY 1
+#define ARCH_CPU_MIPS_FAMILY 1
 #define ARCH_CPU_MIPS64EL 1
 #define ARCH_CPU_64_BITS 1
 #define ARCH_CPU_LITTLE_ENDIAN 1
diff --git a/build/buildflag.h b/build/buildflag.h
new file mode 100644
index 0000000..5776a75
--- /dev/null
+++ b/build/buildflag.h
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BUILD_BUILDFLAG_H_
+#define BUILD_BUILDFLAG_H_
+
+// These macros un-mangle the names of the build flags in a way that looks
+// natural, and gives errors if the flag is not defined. Normally in the
+// preprocessor it's easy to make mistakes that interpret "you haven't done
+// the setup to know what the flag is" as "flag is off". Normally you would
+// include the generated header rather than include this file directly.
+//
+// This is for use with generated headers. See build/buildflag_header.gni.
+
+// This dance of two macros does a concatenation of two preprocessor args using
+// ## doubly indirectly because using ## directly prevents macros in that
+// parameter from being expanded.
+#define BUILDFLAG_CAT_INDIRECT(a, b) a ## b
+#define BUILDFLAG_CAT(a, b) BUILDFLAG_CAT_INDIRECT(a, b)
+
+// Accessor for build flags.
+//
+// To test for a value, if the build file specifies:
+//
+//   ENABLE_FOO=true
+//
+// Then you would check at build-time in source code with:
+//
+//   #include "foo_flags.h"  // The header the build file specified.
+//
+//   #if BUILDFLAG(ENABLE_FOO)
+//     ...
+//   #endif
+//
+// There will no #define called ENABLE_FOO so if you accidentally test for
+// whether that is defined, it will always be negative. You can also use
+// the value in expressions:
+//
+//   const char kSpamServerName[] = BUILDFLAG(SPAM_SERVER_NAME);
+//
+// Because the flag is accessed as a preprocessor macro with (), an error
+// will be thrown if the proper header defining the internal flag value has
+// not been included.
+#define BUILDFLAG(flag) (BUILDFLAG_CAT(BUILDFLAG_INTERNAL_, flag)())
+
+#endif  // BUILD_BUILDFLAG_H_
diff --git a/components/timers/BUILD.gn b/components/timers/BUILD.gn
index d6a7efb..c6f4a12 100644
--- a/components/timers/BUILD.gn
+++ b/components/timers/BUILD.gn
@@ -12,3 +12,17 @@
     "//base",
   ]
 }
+
+source_set("unit_tests") {
+  testonly = true
+
+  sources = [
+    "alarm_timer_unittest.cc",
+  ]
+
+  deps = [
+    ":timers",
+    "//base",
+    "//testing/gtest",
+  ]
+}
diff --git a/components/timers/alarm_timer_chromeos.cc b/components/timers/alarm_timer_chromeos.cc
index ae14870..3f1abbf 100644
--- a/components/timers/alarm_timer_chromeos.cc
+++ b/components/timers/alarm_timer_chromeos.cc
@@ -16,8 +16,8 @@
 #include "base/macros.h"
 #include "base/message_loop/message_loop.h"
 #include "base/pending_task.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
 
 namespace timers {
@@ -145,7 +145,7 @@
   base::Closure on_timer_fired_callback_for_test_;
 
   // Manages watching file descriptors.
-  scoped_ptr<base::MessageLoopForIO::FileDescriptorWatcher> fd_watcher_;
+  std::unique_ptr<base::MessageLoopForIO::FileDescriptorWatcher> fd_watcher_;
 
   // The sequence numbers of the last Reset() call handled respectively on
   // |origin_task_runner_| and on the MessageLoopForIO used for watching the
@@ -229,8 +229,12 @@
 
   // Now clear the timer.
   DCHECK_NE(alarm_fd_, -1);
+#if defined(ANDROID)
   itimerspec blank_time;
   memset(&blank_time, 0, sizeof(blank_time));
+#else
+  itimerspec blank_time = {};
+#endif  // defined(ANDROID)
   if (timerfd_settime(alarm_fd_, 0, &blank_time, NULL) < 0)
     PLOG(ERROR) << "Unable to clear alarm time.  Timer may still fire.";
 }
@@ -254,7 +258,7 @@
   }
 }
 
-void AlarmTimer::Delegate::OnFileCanWriteWithoutBlocking(int /* fd */) {
+void AlarmTimer::Delegate::OnFileCanWriteWithoutBlocking(int /*fd*/) {
   NOTREACHED();
 }
 
@@ -286,8 +290,12 @@
 
   // Actually set the timer.  This will also clear the pre-existing timer, if
   // any.
+#if defined(ANDROID)
   itimerspec alarm_time;
   memset(&alarm_time, 0, sizeof(alarm_time));
+#else
+  itimerspec alarm_time = {};
+#endif  // defined(ANDROID)
   alarm_time.it_value.tv_sec = delay.InSeconds();
   alarm_time.it_value.tv_nsec =
       (delay.InMicroseconds() % base::Time::kMicrosecondsPerSecond) *
@@ -428,7 +436,8 @@
 
   // Take ownership of the pending user task, which is going to be cleared by
   // the Stop() or Reset() functions below.
-  scoped_ptr<base::PendingTask> pending_user_task(std::move(pending_task_));
+  std::unique_ptr<base::PendingTask> pending_user_task(
+      std::move(pending_task_));
 
   // Re-schedule or stop the timer as requested.
   if (base::Timer::is_repeating())
diff --git a/components/timers/alarm_timer_chromeos.h b/components/timers/alarm_timer_chromeos.h
index 2f6b0ff..313c9f9 100644
--- a/components/timers/alarm_timer_chromeos.h
+++ b/components/timers/alarm_timer_chromeos.h
@@ -5,6 +5,8 @@
 #ifndef COMPONENTS_TIMERS_ALARM_TIMER_CHROMEOS_H_
 #define COMPONENTS_TIMERS_ALARM_TIMER_CHROMEOS_H_
 
+#include <memory>
+
 #include "base/callback.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
@@ -67,7 +69,7 @@
 
   // Keeps track of the user task we want to run.  A new one is constructed
   // every time Reset() is called.
-  scoped_ptr<base::PendingTask> pending_task_;
+  std::unique_ptr<base::PendingTask> pending_task_;
 
   // Tracks whether the timer has the ability to wake the system up from
   // suspend.  This is a runtime check because we won't know if the system
@@ -82,7 +84,7 @@
   // Observes |origin_message_loop_| and informs this class if it will be
   // destroyed.
   class MessageLoopObserver;
-  scoped_ptr<MessageLoopObserver> message_loop_observer_;
+  std::unique_ptr<MessageLoopObserver> message_loop_observer_;
 
   base::WeakPtrFactory<AlarmTimer> weak_factory_;
 
diff --git a/crypto/BUILD.gn b/crypto/BUILD.gn
index 8d24e60..a912d93 100644
--- a/crypto/BUILD.gn
+++ b/crypto/BUILD.gn
@@ -8,40 +8,31 @@
 component("crypto") {
   output_name = "crcrypto"  # Avoid colliding with OpenSSL's libcrypto.
   sources = [
-    "aead_openssl.cc",
-    "aead_openssl.h",
+    "aead.cc",
+    "aead.h",
     "apple_keychain.h",
     "apple_keychain_ios.mm",
     "apple_keychain_mac.mm",
+    "auto_cbb.h",
     "capi_util.cc",
     "capi_util.h",
     "crypto_export.h",
     "cssm_init.cc",
     "cssm_init.h",
-    "curve25519-donna.c",
+    "curve25519.cc",
     "curve25519.h",
-    "curve25519_nss.cc",
-    "curve25519_openssl.cc",
+    "ec_private_key.cc",
     "ec_private_key.h",
-    "ec_private_key_nss.cc",
-    "ec_private_key_openssl.cc",
     "ec_signature_creator.cc",
     "ec_signature_creator.h",
+    "ec_signature_creator_impl.cc",
     "ec_signature_creator_impl.h",
-    "ec_signature_creator_nss.cc",
-    "ec_signature_creator_openssl.cc",
     "encryptor.cc",
     "encryptor.h",
-    "encryptor_nss.cc",
-    "encryptor_openssl.cc",
-    "ghash.cc",
-    "ghash.h",
     "hkdf.cc",
     "hkdf.h",
     "hmac.cc",
     "hmac.h",
-    "hmac_nss.cc",
-    "hmac_openssl.cc",
     "mac_security_services_lock.cc",
     "mac_security_services_lock.h",
 
@@ -68,34 +59,20 @@
     "random.h",
     "rsa_private_key.cc",
     "rsa_private_key.h",
-    "rsa_private_key_nss.cc",
-    "rsa_private_key_openssl.cc",
     "scoped_capi_types.h",
     "scoped_nss_types.h",
+    "secure_hash.cc",
     "secure_hash.h",
-    "secure_hash_default.cc",
-    "secure_hash_openssl.cc",
     "secure_util.cc",
     "secure_util.h",
     "sha2.cc",
     "sha2.h",
+    "signature_creator.cc",
     "signature_creator.h",
-    "signature_creator_nss.cc",
-    "signature_creator_openssl.cc",
+    "signature_verifier.cc",
     "signature_verifier.h",
-    "signature_verifier_nss.cc",
-    "signature_verifier_openssl.cc",
+    "symmetric_key.cc",
     "symmetric_key.h",
-    "symmetric_key_nss.cc",
-    "symmetric_key_openssl.cc",
-    "third_party/nss/chromium-blapi.h",
-    "third_party/nss/chromium-blapit.h",
-    "third_party/nss/chromium-nss.h",
-    "third_party/nss/chromium-sha256.h",
-    "third_party/nss/pk11akey.cc",
-    "third_party/nss/rsawrapr.c",
-    "third_party/nss/secsign.cc",
-    "third_party/nss/sha512.cc",
   ]
 
   # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
@@ -113,6 +90,11 @@
       "mock_apple_keychain.cc",
       "mock_apple_keychain.h",
     ]
+  } else {
+    libs = [
+      "CoreFoundation.framework",
+      "Security.framework",
+    ]
   }
 
   if (!is_mac) {
@@ -130,56 +112,8 @@
     ]
   }
 
-  if (is_android) {
-    deps += [ "//third_party/android_tools:cpu_features" ]
-  }
-
-  if (use_openssl) {
-    # Remove NSS files when using OpenSSL
-    sources -= [
-      "curve25519-donna.c",
-      "curve25519_nss.cc",
-      "ec_private_key_nss.cc",
-      "ec_signature_creator_nss.cc",
-      "encryptor_nss.cc",
-      "hmac_nss.cc",
-      "rsa_private_key_nss.cc",
-      "secure_hash_default.cc",
-      "signature_creator_nss.cc",
-      "signature_verifier_nss.cc",
-      "symmetric_key_nss.cc",
-      "third_party/nss/chromium-blapi.h",
-      "third_party/nss/chromium-blapit.h",
-      "third_party/nss/chromium-nss.h",
-      "third_party/nss/pk11akey.cc",
-      "third_party/nss/rsawrapr.c",
-      "third_party/nss/secsign.cc",
-    ]
-  } else {
-    # Remove OpenSSL when using NSS.
-    sources -= [
-      "aead_openssl.cc",
-      "aead_openssl.h",
-      "curve25519_openssl.cc",
-      "ec_private_key_openssl.cc",
-      "ec_signature_creator_openssl.cc",
-      "encryptor_openssl.cc",
-      "hmac_openssl.cc",
-      "openssl_bio_string.cc",
-      "openssl_bio_string.h",
-      "openssl_util.cc",
-      "openssl_util.h",
-      "rsa_private_key_openssl.cc",
-      "secure_hash_openssl.cc",
-      "signature_creator_openssl.cc",
-      "signature_verifier_openssl.cc",
-      "symmetric_key_openssl.cc",
-    ]
-  }
-
-  # Some files are built when NSS is used at all, either for the internal crypto
-  # library or the platform certificate library.
-  if (use_openssl && !use_nss_certs) {
+  # Some files are built when NSS is used for the platform certificate library.
+  if (!use_nss_certs) {
     sources -= [
       "nss_key_util.cc",
       "nss_key_util.h",
@@ -196,55 +130,13 @@
   }
 }
 
-# TODO(GYP): TODO(dpranke), fix the compile errors for this stuff
-# and make it work.
-if (false && is_win) {
-  # A minimal crypto subset for hmac-related stuff that small standalone
-  # targets can use to reduce code size on Windows. This does not depend on
-  # OpenSSL/NSS but will use Windows APIs for that functionality.
-  source_set("crypto_minimal_win") {
-    sources = [
-      "crypto_export.h",
-      "hmac.cc",
-      "hmac.h",
-      "hmac_win.cc",
-      "scoped_capi_types.h",
-      "scoped_nss_types.h",
-      "secure_util.cc",
-      "secure_util.h",
-      "symmetric_key.h",
-      "symmetric_key_win.cc",
-      "third_party/nss/chromium-blapi.h",
-      "third_party/nss/chromium-sha256.h",
-      "third_party/nss/sha512.cc",
-    ]
-
-    deps = [
-      "//base",
-      "//base/third_party/dynamic_annotations",
-    ]
-
-    defines = [ "CRYPTO_IMPLEMENTATION" ]
-  }
-}
-
-# TODO(GYP): Delete this after we've converted everything to GN.
-# The _run targets exist only for compatibility w/ GYP.
-group("crypto_unittests_run") {
-  testonly = true
-  deps = [
-    ":crypto_unittests",
-  ]
-}
-
 test("crypto_unittests") {
   sources = [
-    "aead_openssl_unittest.cc",
+    "aead_unittest.cc",
     "curve25519_unittest.cc",
     "ec_private_key_unittest.cc",
     "ec_signature_creator_unittest.cc",
     "encryptor_unittest.cc",
-    "ghash_unittest.cc",
     "hkdf_unittest.cc",
     "hmac_unittest.cc",
     "nss_key_util_unittest.cc",
@@ -261,19 +153,14 @@
     "symmetric_key_unittest.cc",
   ]
 
-  # Some files are built when NSS is used at all, either for the internal crypto
-  # library or the platform certificate library.
-  if (use_openssl && !use_nss_certs) {
+  # Some files are built when NSS is used for the platform certificate library.
+  if (!use_nss_certs) {
     sources -= [
       "nss_key_util_unittest.cc",
       "nss_util_unittest.cc",
     ]
   }
 
-  if (!use_openssl) {
-    sources -= [ "openssl_bio_string_unittest.cc" ]
-  }
-
   configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
 
   deps = [
@@ -288,40 +175,36 @@
   ]
 }
 
+# This has no sources in some cases so can't be a static library.
 source_set("test_support") {
-  sources = [
-    "scoped_test_nss_chromeos_user.cc",
-    "scoped_test_nss_chromeos_user.h",
-    "scoped_test_nss_db.cc",
-    "scoped_test_nss_db.h",
-    "scoped_test_system_nss_key_slot.cc",
-    "scoped_test_system_nss_key_slot.h",
-  ]
-  deps = [
-    ":crypto",
-    ":platform",
-    "//base",
-  ]
+  testonly = true
+  sources = []
 
-  if (!use_nss_certs) {
-    sources -= [
+  if (use_nss_certs) {
+    sources += [
       "scoped_test_nss_db.cc",
       "scoped_test_nss_db.h",
     ]
   }
 
-  if (!is_chromeos) {
-    sources -= [
+  if (is_chromeos) {
+    sources += [
       "scoped_test_nss_chromeos_user.cc",
       "scoped_test_nss_chromeos_user.h",
       "scoped_test_system_nss_key_slot.cc",
       "scoped_test_system_nss_key_slot.h",
     ]
   }
+
+  deps = [
+    ":crypto",
+    ":platform",
+    "//base",
+  ]
 }
 
 config("platform_config") {
-  if ((!use_openssl || use_nss_certs) && is_clang) {
+  if (use_nss_certs && is_clang) {
     # There is a broken header guard in /usr/include/nss/secmod.h:
     # https://bugzilla.mozilla.org/show_bug.cgi?id=884072
     cflags = [ "-Wno-header-guard" ]
@@ -332,40 +215,14 @@
 # according to the state of the crypto flags. A target just wanting to depend
 # on the current SSL library should just depend on this.
 group("platform") {
-  if (use_openssl) {
-    public_deps = [
-      "//third_party/boringssl",
-    ]
-  } else {
-    public_deps = [
-      "//net/third_party/nss/ssl:libssl",
-    ]
-  }
+  public_deps = [
+    "//third_party/boringssl",
+  ]
 
-  # Link in NSS if it is used for either the internal crypto library
-  # (!use_openssl) or platform certificate library (use_nss_certs).
-  if (!use_openssl || use_nss_certs) {
-    if (is_linux) {
-      # On Linux, we use the system NSS (excepting SSL where we always use our
-      # own).
-      public_configs = [ ":platform_config" ]
-      if (!use_openssl) {
-        # If using a bundled copy of NSS's SSL library, ensure the bundled SSL
-        # header search path comes before the system one so our versions are
-        # used. The libssl target will add the search path we want, but
-        # according to GN's ordering rules, public_configs' search path will get
-        # applied before ones inherited from our dependencies.  Therefore, we
-        # need to explicitly list our custom libssl's config here before the
-        # system one.
-        public_configs += [ "//net/third_party/nss/ssl:ssl_config" ]
-      }
-      public_configs += [ "//third_party/nss:system_nss_no_ssl_config" ]
-    } else {
-      # Non-Linux platforms use the hermetic NSS from the tree.
-      public_deps += [
-        "//third_party/nss:nspr",
-        "//third_party/nss:nss",
-      ]
-    }
+  # Link in NSS if it is used for the platform certificate library
+  # (use_nss_certs).
+  if (use_nss_certs) {
+    public_configs = [ ":platform_config" ]
+    public_configs += [ "//third_party/nss:system_nss_no_ssl_config" ]
   }
 }
diff --git a/crypto/auto_cbb.h b/crypto/auto_cbb.h
new file mode 100644
index 0000000..5206a21
--- /dev/null
+++ b/crypto/auto_cbb.h
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_AUTO_CBB_H_
+#define CRYPTO_AUTO_CBB_H_
+
+#include <openssl/bytestring.h>
+
+#include "base/macros.h"
+
+namespace crypto {
+
+// AutoCBB is a wrapper over OpenSSL's CBB type that automatically releases
+// resources when going out of scope.
+class AutoCBB {
+ public:
+  AutoCBB() { CBB_zero(&cbb_); }
+  ~AutoCBB() { CBB_cleanup(&cbb_); }
+
+  CBB* get() { return &cbb_; }
+
+  void Reset() {
+    CBB_cleanup(&cbb_);
+    CBB_zero(&cbb_);
+  }
+
+ private:
+  CBB cbb_;
+  DISALLOW_COPY_AND_ASSIGN(AutoCBB);
+};
+
+}  // namespace crypto
+
+#endif   // CRYPTO_AUTO_CBB_H_
diff --git a/crypto/crypto.gyp b/crypto/crypto.gyp
index 2590c4f..8ed2ab2 100644
--- a/crypto/crypto.gyp
+++ b/crypto/crypto.gyp
@@ -17,6 +17,7 @@
       'dependencies': [
         '../base/base.gyp:base',
         '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+        '../third_party/boringssl/boringssl.gyp:boringssl',
       ],
       'defines': [
         'CRYPTO_IMPLEMENTATION',
@@ -24,10 +25,10 @@
       'conditions': [
         [ 'os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
           'dependencies': [
-            '../build/linux/system.gyp:ssl',
+            '../build/linux/system.gyp:nss',
           ],
           'export_dependent_settings': [
-            '../build/linux/system.gyp:ssl',
+            '../build/linux/system.gyp:nss',
           ],
           'conditions': [
             [ 'chromeos==1', {
@@ -35,11 +36,6 @@
               },
             ],
           ],
-        }, {  # os_posix != 1 or OS == "mac" or OS == "ios" or OS == "android"
-            'sources!': [
-              'hmac_win.cc',
-              'symmetric_key_win.cc',
-            ],
         }],
         [ 'OS != "mac" and OS != "ios"', {
           'sources!': [
@@ -48,11 +44,6 @@
             'mock_apple_keychain.h',
           ],
         }],
-        [ 'OS == "android"', {
-          'dependencies': [
-            '../build/android/ndk.gyp:cpu_features',
-          ],
-        }],
         [ 'os_bsd==1', {
           'link_settings': {
             'libraries': [
@@ -75,16 +66,6 @@
             'mac_security_services_lock.h',
           ],
         }],
-        [ 'use_openssl == 0 and (OS == "mac" or OS == "ios" or OS == "win")', {
-          'dependencies': [
-            '../third_party/nss/nss.gyp:nspr',
-            '../third_party/nss/nss.gyp:nss',
-          ],
-          'export_dependent_settings': [
-            '../third_party/nss/nss.gyp:nspr',
-            '../third_party/nss/nss.gyp:nss',
-          ],
-        }],
         [ 'OS != "win"', {
           'sources!': [
             'capi_util.h',
@@ -96,57 +77,8 @@
             4267,  # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
           ],
         }],
-        [ 'use_openssl==1', {
-            'dependencies': [
-              '../third_party/boringssl/boringssl.gyp:boringssl',
-            ],
-            # TODO(joth): Use a glob to match exclude patterns once the
-            #             OpenSSL file set is complete.
-            'sources!': [
-              'curve25519-donna.c',
-              'curve25519_nss.cc',
-              'ec_private_key_nss.cc',
-              'ec_signature_creator_nss.cc',
-              'encryptor_nss.cc',
-              'hmac_nss.cc',
-              'rsa_private_key_nss.cc',
-              'secure_hash_default.cc',
-              'signature_creator_nss.cc',
-              'signature_verifier_nss.cc',
-              'symmetric_key_nss.cc',
-              'third_party/nss/chromium-blapi.h',
-              'third_party/nss/chromium-blapit.h',
-              'third_party/nss/chromium-nss.h',
-              'third_party/nss/chromium-prtypes.h',
-              'third_party/nss/chromium-sha256.h',
-              'third_party/nss/pk11akey.cc',
-              'third_party/nss/rsawrapr.c',
-              'third_party/nss/secsign.cc',
-              'third_party/nss/sha512.cc',
-            ],
-          }, {
-            'sources!': [
-              'aead_openssl.cc',
-              'aead_openssl.h',
-              'curve25519_openssl.cc',
-              'ec_private_key_openssl.cc',
-              'ec_signature_creator_openssl.cc',
-              'encryptor_openssl.cc',
-              'hmac_openssl.cc',
-              'openssl_bio_string.cc',
-              'openssl_bio_string.h',
-              'openssl_util.cc',
-              'openssl_util.h',
-              'rsa_private_key_openssl.cc',
-              'secure_hash_openssl.cc',
-              'signature_creator_openssl.cc',
-              'signature_verifier_openssl.cc',
-              'symmetric_key_openssl.cc',
-            ],
-        },],
-        [ 'use_openssl==1 and use_nss_certs==0', {
-            # Some files are built when NSS is used at all, either for the
-            # internal crypto library or the platform certificate library.
+        [ 'use_nss_certs==0', {
+            # Some files are built when NSS is used for the platform certificate library.
             'sources!': [
               'nss_key_util.cc',
               'nss_key_util.h',
@@ -164,12 +96,11 @@
       'target_name': 'crypto_unittests',
       'type': 'executable',
       'sources': [
-        'aead_openssl_unittest.cc',
+        'aead_unittest.cc',
         'curve25519_unittest.cc',
         'ec_private_key_unittest.cc',
         'ec_signature_creator_unittest.cc',
         'encryptor_unittest.cc',
-        'ghash_unittest.cc',
         'hkdf_unittest.cc',
         'hmac_unittest.cc',
         'nss_key_util_unittest.cc',
@@ -193,47 +124,25 @@
         '../base/base.gyp:test_support_base',
         '../testing/gmock.gyp:gmock',
         '../testing/gtest.gyp:gtest',
+        '../third_party/boringssl/boringssl.gyp:boringssl',
       ],
       'conditions': [
         [ 'use_nss_certs == 1', {
-          'conditions': [
-            [ 'use_allocator!="none"', {
-                'dependencies': [
-                  '../base/allocator/allocator.gyp:allocator',
-                ],
-              },
-            ],
-          ],
           'dependencies': [
-            '../build/linux/system.gyp:ssl',
+            '../build/linux/system.gyp:nss',
           ],
         }],
-        [ 'use_openssl == 1 and use_nss_certs == 0', {
-          # Some files are built when NSS is used at all, either for the
-          # internal crypto library or the platform certificate library.
+        [ 'use_nss_certs == 0', {
+          # Some files are built when NSS is used for the platform certificate library.
           'sources!': [
             'nss_key_util_unittest.cc',
             'nss_util_unittest.cc',
           ],
         }],
-        [ 'use_openssl == 0 and (OS == "mac" or OS == "ios" or OS == "win")', {
-          'dependencies': [
-            '../third_party/nss/nss.gyp:nspr',
-          ],
-        }],
         [ 'OS == "win"', {
           # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
           'msvs_disabled_warnings': [4267, ],
         }],
-        [ 'use_openssl==1', {
-          'dependencies': [
-            '../third_party/boringssl/boringssl.gyp:boringssl',
-          ],
-        }, {
-          'sources!': [
-            'openssl_bio_string_unittest.cc',
-          ],
-        }],
       ],
     },
   ],
@@ -242,13 +151,12 @@
       'targets': [
         {
           'target_name': 'crypto_nacl_win64',
-          # We do not want nacl_helper to depend on NSS because this would
-          # require including a 64-bit copy of NSS. Thus, use the native APIs
-          # for the helper.
+          # We use the native APIs for the helper.
           'type': '<(component)',
           'dependencies': [
             '../base/base.gyp:base_win64',
             '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
+            '../third_party/boringssl/boringssl.gyp:boringssl_nacl_win64',
           ],
           'sources': [
             '<@(nacl_win64_sources)',
diff --git a/crypto/crypto.gypi b/crypto/crypto.gypi
index e5cc4f4..dadc0ea 100644
--- a/crypto/crypto.gypi
+++ b/crypto/crypto.gypi
@@ -8,57 +8,43 @@
     # This is required so that we can build them for nacl win64.
     'variables': {
       'hmac_win64_related_sources': [
+        'crypto_export.h',
         'hmac.cc',
         'hmac.h',
-        'hmac_win.cc',
+        'openssl_util.cc',
+        'openssl_util.h',
         'secure_util.cc',
         'secure_util.h',
+        'symmetric_key.cc',
         'symmetric_key.h',
-        'symmetric_key_win.cc',
-        'third_party/nss/chromium-blapi.h',
-        'third_party/nss/chromium-blapit.h',
-        'third_party/nss/chromium-prtypes.h',
-        'third_party/nss/chromium-sha256.h',
-        'third_party/nss/sha512.cc',
-        'wincrypt_shim.h',
       ],
     },
     'crypto_sources': [
       # NOTE: all transitive dependencies of HMAC on windows need
       #     to be placed in the source list above.
       '<@(hmac_win64_related_sources)',
-      'aead_openssl.cc',
-      'aead_openssl.h',
+      'aead.cc',
+      'aead.h',
       'apple_keychain.h',
       'apple_keychain_ios.mm',
       'apple_keychain_mac.mm',
+      'auto_cbb.h',
       'capi_util.cc',
       'capi_util.h',
-      'crypto_export.h',
       'cssm_init.cc',
       'cssm_init.h',
-      'curve25519-donna.c',
+      'curve25519.cc',
       'curve25519.h',
-      'curve25519_nss.cc',
-      'curve25519_openssl.cc',
-      'ghash.cc',
-      'ghash.h',
+      'ec_private_key.cc',
       'ec_private_key.h',
-      'ec_private_key_nss.cc',
-      'ec_private_key_openssl.cc',
       'ec_signature_creator.cc',
       'ec_signature_creator.h',
+      'ec_signature_creator_impl.cc',
       'ec_signature_creator_impl.h',
-      'ec_signature_creator_nss.cc',
-      'ec_signature_creator_openssl.cc',
       'encryptor.cc',
       'encryptor.h',
-      'encryptor_nss.cc',
-      'encryptor_openssl.cc',
       'hkdf.cc',
       'hkdf.h',
-      'hmac_nss.cc',
-      'hmac_openssl.cc',
       'mac_security_services_lock.cc',
       'mac_security_services_lock.h',
       'mock_apple_keychain.cc',
@@ -75,35 +61,23 @@
       'nss_util_internal.h',
       'openssl_bio_string.cc',
       'openssl_bio_string.h',
-      'openssl_util.cc',
-      'openssl_util.h',
       'p224.cc',
       'p224.h',
       'random.h',
       'random.cc',
       'rsa_private_key.cc',
       'rsa_private_key.h',
-      'rsa_private_key_nss.cc',
-      'rsa_private_key_openssl.cc',
       'scoped_capi_types.h',
       'scoped_nss_types.h',
+      'secure_hash.cc',
       'secure_hash.h',
-      'secure_hash_default.cc',
-      'secure_hash_openssl.cc',
       'sha2.cc',
       'sha2.h',
+      'signature_creator.cc',
       'signature_creator.h',
-      'signature_creator_nss.cc',
-      'signature_creator_openssl.cc',
+      'signature_verifier.cc',
       'signature_verifier.h',
-      'signature_verifier_nss.cc',
-      'signature_verifier_openssl.cc',
-      'symmetric_key_nss.cc',
-      'symmetric_key_openssl.cc',
-      'third_party/nss/chromium-nss.h',
-      'third_party/nss/pk11akey.cc',
-      'third_party/nss/rsawrapr.c',
-      'third_party/nss/secsign.cc',
+      'wincrypt_shim.h',
     ],
     'nacl_win64_sources': [
       '<@(hmac_win64_related_sources)',
diff --git a/crypto/crypto_nacl.gyp b/crypto/crypto_nacl.gyp
index 255c42c..c7c01a8 100644
--- a/crypto/crypto_nacl.gyp
+++ b/crypto/crypto_nacl.gyp
@@ -38,8 +38,6 @@
         ['exclude', '^cssm_'],
         ['exclude', '^nss_'],
         ['exclude', '^mac_'],
-        ['exclude', '^third_party/nss/'],
-        ['include', '^third_party/nss/sha512.cc'],
       ],
     },
   ],
diff --git a/crypto/curve25519-donna.c b/crypto/curve25519-donna.c
deleted file mode 100644
index f141ac0..0000000
--- a/crypto/curve25519-donna.c
+++ /dev/null
@@ -1,592 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-/*
- * curve25519-donna: Curve25519 elliptic curve, public key function
- *
- * http://code.google.com/p/curve25519-donna/
- *
- * Adam Langley <agl@imperialviolet.org>
- *
- * Derived from public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
- *
- * More information about curve25519 can be found here
- *   http://cr.yp.to/ecdh.html
- *
- * djb's sample implementation of curve25519 is written in a special assembly
- * language called qhasm and uses the floating point registers.
- *
- * This is, almost, a clean room reimplementation from the curve25519 paper. It
- * uses many of the tricks described therein. Only the crecip function is taken
- * from the sample implementation.
- */
-
-#include <string.h>
-#include <stdint.h>
-
-typedef uint8_t u8;
-typedef int32_t s32;
-typedef int64_t limb;
-
-/* Field element representation:
- *
- * Field elements are written as an array of signed, 64-bit limbs, least
- * significant first. The value of the field element is:
- *   x[0] + 2^26·x[1] + x^51·x[2] + 2^102·x[3] + ...
- *
- * i.e. the limbs are 26, 25, 26, 25, ... bits wide.
- */
-
-/* Sum two numbers: output += in */
-static void fsum(limb *output, const limb *in) {
-  unsigned i;
-  for (i = 0; i < 10; i += 2) {
-    output[0+i] = (output[0+i] + in[0+i]);
-    output[1+i] = (output[1+i] + in[1+i]);
-  }
-}
-
-/* Find the difference of two numbers: output = in - output
- * (note the order of the arguments!)
- */
-static void fdifference(limb *output, const limb *in) {
-  unsigned i;
-  for (i = 0; i < 10; ++i) {
-    output[i] = (in[i] - output[i]);
-  }
-}
-
-/* Multiply a number my a scalar: output = in * scalar */
-static void fscalar_product(limb *output, const limb *in, const limb scalar) {
-  unsigned i;
-  for (i = 0; i < 10; ++i) {
-    output[i] = in[i] * scalar;
-  }
-}
-
-/* Multiply two numbers: output = in2 * in
- *
- * output must be distinct to both inputs. The inputs are reduced coefficient
- * form, the output is not.
- */
-static void fproduct(limb *output, const limb *in2, const limb *in) {
-  output[0] =       ((limb) ((s32) in2[0])) * ((s32) in[0]);
-  output[1] =       ((limb) ((s32) in2[0])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[0]);
-  output[2] =  2 *  ((limb) ((s32) in2[1])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[0]);
-  output[3] =       ((limb) ((s32) in2[1])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[0]);
-  output[4] =       ((limb) ((s32) in2[2])) * ((s32) in[2]) +
-               2 * (((limb) ((s32) in2[1])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[0]);
-  output[5] =       ((limb) ((s32) in2[2])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[0]);
-  output[6] =  2 * (((limb) ((s32) in2[3])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[0]);
-  output[7] =       ((limb) ((s32) in2[3])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[0]);
-  output[8] =       ((limb) ((s32) in2[4])) * ((s32) in[4]) +
-               2 * (((limb) ((s32) in2[3])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[0]);
-  output[9] =       ((limb) ((s32) in2[4])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[0]);
-  output[10] = 2 * (((limb) ((s32) in2[5])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[2]);
-  output[11] =      ((limb) ((s32) in2[5])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[2]);
-  output[12] =      ((limb) ((s32) in2[6])) * ((s32) in[6]) +
-               2 * (((limb) ((s32) in2[5])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[3])) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[4]);
-  output[13] =      ((limb) ((s32) in2[6])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[4]);
-  output[14] = 2 * (((limb) ((s32) in2[7])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[5])) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[6]);
-  output[15] =      ((limb) ((s32) in2[7])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[6]);
-  output[16] =      ((limb) ((s32) in2[8])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in2[7])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[7]));
-  output[17] =      ((limb) ((s32) in2[8])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[8]);
-  output[18] = 2 *  ((limb) ((s32) in2[9])) * ((s32) in[9]);
-}
-
-/* Reduce a long form to a short form by taking the input mod 2^255 - 19. */
-static void freduce_degree(limb *output) {
-  /* Each of these shifts and adds ends up multiplying the value by 19. */
-  output[8] += output[18] << 4;
-  output[8] += output[18] << 1;
-  output[8] += output[18];
-  output[7] += output[17] << 4;
-  output[7] += output[17] << 1;
-  output[7] += output[17];
-  output[6] += output[16] << 4;
-  output[6] += output[16] << 1;
-  output[6] += output[16];
-  output[5] += output[15] << 4;
-  output[5] += output[15] << 1;
-  output[5] += output[15];
-  output[4] += output[14] << 4;
-  output[4] += output[14] << 1;
-  output[4] += output[14];
-  output[3] += output[13] << 4;
-  output[3] += output[13] << 1;
-  output[3] += output[13];
-  output[2] += output[12] << 4;
-  output[2] += output[12] << 1;
-  output[2] += output[12];
-  output[1] += output[11] << 4;
-  output[1] += output[11] << 1;
-  output[1] += output[11];
-  output[0] += output[10] << 4;
-  output[0] += output[10] << 1;
-  output[0] += output[10];
-}
-
-/* Reduce all coefficients of the short form input so that |x| < 2^26.
- *
- * On entry: |output[i]| < 2^62
- */
-static void freduce_coefficients(limb *output) {
-  unsigned i;
-  do {
-    output[10] = 0;
-
-    for (i = 0; i < 10; i += 2) {
-      limb over = output[i] / 0x4000000l;
-      output[i+1] += over;
-      output[i] -= over * 0x4000000l;
-
-      over = output[i+1] / 0x2000000;
-      output[i+2] += over;
-      output[i+1] -= over * 0x2000000;
-    }
-    output[0] += 19 * output[10];
-  } while (output[10]);
-}
-
-/* A helpful wrapper around fproduct: output = in * in2.
- *
- * output must be distinct to both inputs. The output is reduced degree and
- * reduced coefficient.
- */
-static void
-fmul(limb *output, const limb *in, const limb *in2) {
-  limb t[19];
-  fproduct(t, in, in2);
-  freduce_degree(t);
-  freduce_coefficients(t);
-  memcpy(output, t, sizeof(limb) * 10);
-}
-
-static void fsquare_inner(limb *output, const limb *in) {
-  output[0] =       ((limb) ((s32) in[0])) * ((s32) in[0]);
-  output[1] =  2 *  ((limb) ((s32) in[0])) * ((s32) in[1]);
-  output[2] =  2 * (((limb) ((s32) in[1])) * ((s32) in[1]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[2]));
-  output[3] =  2 * (((limb) ((s32) in[1])) * ((s32) in[2]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[3]));
-  output[4] =       ((limb) ((s32) in[2])) * ((s32) in[2]) +
-               4 *  ((limb) ((s32) in[1])) * ((s32) in[3]) +
-               2 *  ((limb) ((s32) in[0])) * ((s32) in[4]);
-  output[5] =  2 * (((limb) ((s32) in[2])) * ((s32) in[3]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[4]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[5]));
-  output[6] =  2 * (((limb) ((s32) in[3])) * ((s32) in[3]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[4]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[6]) +
-               2 *  ((limb) ((s32) in[1])) * ((s32) in[5]));
-  output[7] =  2 * (((limb) ((s32) in[3])) * ((s32) in[4]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[5]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[7]));
-  output[8] =       ((limb) ((s32) in[4])) * ((s32) in[4]) +
-               2 * (((limb) ((s32) in[2])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in[1])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[5])));
-  output[9] =  2 * (((limb) ((s32) in[4])) * ((s32) in[5]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[9]));
-  output[10] = 2 * (((limb) ((s32) in[5])) * ((s32) in[5]) +
-                    ((limb) ((s32) in[4])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in[3])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[9])));
-  output[11] = 2 * (((limb) ((s32) in[5])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[4])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[9]));
-  output[12] =      ((limb) ((s32) in[6])) * ((s32) in[6]) +
-               2 * (((limb) ((s32) in[4])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in[5])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[9])));
-  output[13] = 2 * (((limb) ((s32) in[6])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[5])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[4])) * ((s32) in[9]));
-  output[14] = 2 * (((limb) ((s32) in[7])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[6])) * ((s32) in[8]) +
-               2 *  ((limb) ((s32) in[5])) * ((s32) in[9]));
-  output[15] = 2 * (((limb) ((s32) in[7])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[6])) * ((s32) in[9]));
-  output[16] =      ((limb) ((s32) in[8])) * ((s32) in[8]) +
-               4 *  ((limb) ((s32) in[7])) * ((s32) in[9]);
-  output[17] = 2 *  ((limb) ((s32) in[8])) * ((s32) in[9]);
-  output[18] = 2 *  ((limb) ((s32) in[9])) * ((s32) in[9]);
-}
-
-static void
-fsquare(limb *output, const limb *in) {
-  limb t[19];
-  fsquare_inner(t, in);
-  freduce_degree(t);
-  freduce_coefficients(t);
-  memcpy(output, t, sizeof(limb) * 10);
-}
-
-/* Take a little-endian, 32-byte number and expand it into polynomial form */
-static void
-fexpand(limb *output, const u8 *input) {
-#define F(n,start,shift,mask) \
-  output[n] = ((((limb) input[start + 0]) | \
-                ((limb) input[start + 1]) << 8 | \
-                ((limb) input[start + 2]) << 16 | \
-                ((limb) input[start + 3]) << 24) >> shift) & mask;
-  F(0, 0, 0, 0x3ffffff);
-  F(1, 3, 2, 0x1ffffff);
-  F(2, 6, 3, 0x3ffffff);
-  F(3, 9, 5, 0x1ffffff);
-  F(4, 12, 6, 0x3ffffff);
-  F(5, 16, 0, 0x1ffffff);
-  F(6, 19, 1, 0x3ffffff);
-  F(7, 22, 3, 0x1ffffff);
-  F(8, 25, 4, 0x3ffffff);
-  F(9, 28, 6, 0x1ffffff);
-#undef F
-}
-
-/* Take a fully reduced polynomial form number and contract it into a
- * little-endian, 32-byte array
- */
-static void
-fcontract(u8 *output, limb *input) {
-  int i;
-
-  do {
-    for (i = 0; i < 9; ++i) {
-      if ((i & 1) == 1) {
-        while (input[i] < 0) {
-          input[i] += 0x2000000;
-          input[i + 1]--;
-        }
-      } else {
-        while (input[i] < 0) {
-          input[i] += 0x4000000;
-          input[i + 1]--;
-        }
-      }
-    }
-    while (input[9] < 0) {
-      input[9] += 0x2000000;
-      input[0] -= 19;
-    }
-  } while (input[0] < 0);
-
-  input[1] <<= 2;
-  input[2] <<= 3;
-  input[3] <<= 5;
-  input[4] <<= 6;
-  input[6] <<= 1;
-  input[7] <<= 3;
-  input[8] <<= 4;
-  input[9] <<= 6;
-#define F(i, s) \
-  output[s+0] |=  input[i] & 0xff; \
-  output[s+1]  = (input[i] >> 8) & 0xff; \
-  output[s+2]  = (input[i] >> 16) & 0xff; \
-  output[s+3]  = (input[i] >> 24) & 0xff;
-  output[0] = 0;
-  output[16] = 0;
-  F(0,0);
-  F(1,3);
-  F(2,6);
-  F(3,9);
-  F(4,12);
-  F(5,16);
-  F(6,19);
-  F(7,22);
-  F(8,25);
-  F(9,28);
-#undef F
-}
-
-/* Input: Q, Q', Q-Q'
- * Output: 2Q, Q+Q'
- *
- *   x2 z3: long form
- *   x3 z3: long form
- *   x z: short form, destroyed
- *   xprime zprime: short form, destroyed
- *   qmqp: short form, preserved
- */
-static void fmonty(limb *x2, limb *z2,  /* output 2Q */
-                   limb *x3, limb *z3,  /* output Q + Q' */
-                   limb *x, limb *z,    /* input Q */
-                   limb *xprime, limb *zprime,  /* input Q' */
-                   const limb *qmqp /* input Q - Q' */) {
-  limb origx[10], origxprime[10], zzz[19], xx[19], zz[19], xxprime[19],
-        zzprime[19], zzzprime[19], xxxprime[19];
-
-  memcpy(origx, x, 10 * sizeof(limb));
-  fsum(x, z);
-  fdifference(z, origx);  // does x - z
-
-  memcpy(origxprime, xprime, sizeof(limb) * 10);
-  fsum(xprime, zprime);
-  fdifference(zprime, origxprime);
-  fproduct(xxprime, xprime, z);
-  fproduct(zzprime, x, zprime);
-  freduce_degree(xxprime);
-  freduce_coefficients(xxprime);
-  freduce_degree(zzprime);
-  freduce_coefficients(zzprime);
-  memcpy(origxprime, xxprime, sizeof(limb) * 10);
-  fsum(xxprime, zzprime);
-  fdifference(zzprime, origxprime);
-  fsquare(xxxprime, xxprime);
-  fsquare(zzzprime, zzprime);
-  fproduct(zzprime, zzzprime, qmqp);
-  freduce_degree(zzprime);
-  freduce_coefficients(zzprime);
-  memcpy(x3, xxxprime, sizeof(limb) * 10);
-  memcpy(z3, zzprime, sizeof(limb) * 10);
-
-  fsquare(xx, x);
-  fsquare(zz, z);
-  fproduct(x2, xx, zz);
-  freduce_degree(x2);
-  freduce_coefficients(x2);
-  fdifference(zz, xx);  // does zz = xx - zz
-  memset(zzz + 10, 0, sizeof(limb) * 9);
-  fscalar_product(zzz, zz, 121665);
-  freduce_degree(zzz);
-  freduce_coefficients(zzz);
-  fsum(zzz, xx);
-  fproduct(z2, zz, zzz);
-  freduce_degree(z2);
-  freduce_coefficients(z2);
-}
-
-/* Calculates nQ where Q is the x-coordinate of a point on the curve
- *
- *   resultx/resultz: the x coordinate of the resulting curve point (short form)
- *   n: a little endian, 32-byte number
- *   q: a point of the curve (short form)
- */
-static void
-cmult(limb *resultx, limb *resultz, const u8 *n, const limb *q) {
-  limb a[19] = {0}, b[19] = {1}, c[19] = {1}, d[19] = {0};
-  limb *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t;
-  limb e[19] = {0}, f[19] = {1}, g[19] = {0}, h[19] = {1};
-  limb *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h;
-
-  unsigned i, j;
-
-  memcpy(nqpqx, q, sizeof(limb) * 10);
-
-  for (i = 0; i < 32; ++i) {
-    u8 byte = n[31 - i];
-    for (j = 0; j < 8; ++j) {
-      if (byte & 0x80) {
-        fmonty(nqpqx2, nqpqz2,
-               nqx2, nqz2,
-               nqpqx, nqpqz,
-               nqx, nqz,
-               q);
-      } else {
-        fmonty(nqx2, nqz2,
-               nqpqx2, nqpqz2,
-               nqx, nqz,
-               nqpqx, nqpqz,
-               q);
-      }
-
-      t = nqx;
-      nqx = nqx2;
-      nqx2 = t;
-      t = nqz;
-      nqz = nqz2;
-      nqz2 = t;
-      t = nqpqx;
-      nqpqx = nqpqx2;
-      nqpqx2 = t;
-      t = nqpqz;
-      nqpqz = nqpqz2;
-      nqpqz2 = t;
-
-      byte <<= 1;
-    }
-  }
-
-  memcpy(resultx, nqx, sizeof(limb) * 10);
-  memcpy(resultz, nqz, sizeof(limb) * 10);
-}
-
-// -----------------------------------------------------------------------------
-// Shamelessly copied from djb's code
-// -----------------------------------------------------------------------------
-static void
-crecip(limb *out, const limb *z) {
-  limb z2[10];
-  limb z9[10];
-  limb z11[10];
-  limb z2_5_0[10];
-  limb z2_10_0[10];
-  limb z2_20_0[10];
-  limb z2_50_0[10];
-  limb z2_100_0[10];
-  limb t0[10];
-  limb t1[10];
-  int i;
-
-  /* 2 */ fsquare(z2,z);
-  /* 4 */ fsquare(t1,z2);
-  /* 8 */ fsquare(t0,t1);
-  /* 9 */ fmul(z9,t0,z);
-  /* 11 */ fmul(z11,z9,z2);
-  /* 22 */ fsquare(t0,z11);
-  /* 2^5 - 2^0 = 31 */ fmul(z2_5_0,t0,z9);
-
-  /* 2^6 - 2^1 */ fsquare(t0,z2_5_0);
-  /* 2^7 - 2^2 */ fsquare(t1,t0);
-  /* 2^8 - 2^3 */ fsquare(t0,t1);
-  /* 2^9 - 2^4 */ fsquare(t1,t0);
-  /* 2^10 - 2^5 */ fsquare(t0,t1);
-  /* 2^10 - 2^0 */ fmul(z2_10_0,t0,z2_5_0);
-
-  /* 2^11 - 2^1 */ fsquare(t0,z2_10_0);
-  /* 2^12 - 2^2 */ fsquare(t1,t0);
-  /* 2^20 - 2^10 */
-  for (i = 2;i < 10;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^20 - 2^0 */ fmul(z2_20_0,t1,z2_10_0);
-
-  /* 2^21 - 2^1 */ fsquare(t0,z2_20_0);
-  /* 2^22 - 2^2 */ fsquare(t1,t0);
-  /* 2^40 - 2^20 */
-  for (i = 2;i < 20;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^40 - 2^0 */ fmul(t0,t1,z2_20_0);
-
-  /* 2^41 - 2^1 */ fsquare(t1,t0);
-  /* 2^42 - 2^2 */ fsquare(t0,t1);
-  /* 2^50 - 2^10 */
-  for (i = 2;i < 10;i += 2) { fsquare(t1,t0); fsquare(t0,t1); }
-  /* 2^50 - 2^0 */ fmul(z2_50_0,t0,z2_10_0);
-
-  /* 2^51 - 2^1 */ fsquare(t0,z2_50_0);
-  /* 2^52 - 2^2 */ fsquare(t1,t0);
-  /* 2^100 - 2^50 */
-  for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^100 - 2^0 */ fmul(z2_100_0,t1,z2_50_0);
-
-  /* 2^101 - 2^1 */ fsquare(t1,z2_100_0);
-  /* 2^102 - 2^2 */ fsquare(t0,t1);
-  /* 2^200 - 2^100 */
-  for (i = 2;i < 100;i += 2) { fsquare(t1,t0); fsquare(t0,t1); }
-  /* 2^200 - 2^0 */ fmul(t1,t0,z2_100_0);
-
-  /* 2^201 - 2^1 */ fsquare(t0,t1);
-  /* 2^202 - 2^2 */ fsquare(t1,t0);
-  /* 2^250 - 2^50 */
-  for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^250 - 2^0 */ fmul(t0,t1,z2_50_0);
-
-  /* 2^251 - 2^1 */ fsquare(t1,t0);
-  /* 2^252 - 2^2 */ fsquare(t0,t1);
-  /* 2^253 - 2^3 */ fsquare(t1,t0);
-  /* 2^254 - 2^4 */ fsquare(t0,t1);
-  /* 2^255 - 2^5 */ fsquare(t1,t0);
-  /* 2^255 - 21 */ fmul(out,t1,z11);
-}
-
-int
-curve25519_donna(u8 *mypublic, const u8 *secret, const u8 *basepoint) {
-  limb bp[10], x[10], z[10], zmone[10];
-  uint8_t e[32];
-  int i;
-
-  for (i = 0; i < 32; ++i) e[i] = secret[i];
-  e[0] &= 248;
-  e[31] &= 127;
-  e[31] |= 64;
-
-  fexpand(bp, basepoint);
-  cmult(x, z, e, bp);
-  crecip(zmone, z);
-  fmul(z, x, zmone);
-  fcontract(mypublic, z);
-  return 0;
-}
diff --git a/crypto/ec_private_key.h b/crypto/ec_private_key.h
index 9a8a02a..a24219b 100644
--- a/crypto/ec_private_key.h
+++ b/crypto/ec_private_key.h
@@ -8,6 +8,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -37,16 +38,25 @@
  public:
   ~ECPrivateKey();
 
-  // Creates a new random instance. Can return NULL if initialization fails.
+  // Creates a new random instance. Can return nullptr if initialization fails.
   // The created key will use the NIST P-256 curve.
   // TODO(mattm): Add a curve parameter.
-  static ECPrivateKey* Create();
+  static std::unique_ptr<ECPrivateKey> Create();
+
+  // Create a new instance by importing an existing private key. The format is
+  // an ASN.1-encoded PrivateKeyInfo block from PKCS #8. This can return
+  // nullptr if initialization fails.
+  static std::unique_ptr<ECPrivateKey> CreateFromPrivateKeyInfo(
+      const std::vector<uint8_t>& input);
 
   // Creates a new instance by importing an existing key pair.
   // The key pair is given as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
   // block and an X.509 SubjectPublicKeyInfo block.
-  // Returns NULL if initialization fails.
-  static ECPrivateKey* CreateFromEncryptedPrivateKeyInfo(
+  // Returns nullptr if initialization fails.
+  //
+  // This function is deprecated. Use CreateFromPrivateKeyInfo for new code.
+  // See https://crbug.com/603319.
+  static std::unique_ptr<ECPrivateKey> CreateFromEncryptedPrivateKeyInfo(
       const std::string& password,
       const std::vector<uint8_t>& encrypted_private_key_info,
       const std::vector<uint8_t>& subject_public_key_info);
@@ -69,7 +79,7 @@
 #endif
 
   // Returns a copy of the object.
-  ECPrivateKey* Copy() const;
+  std::unique_ptr<ECPrivateKey> Copy() const;
 
 #if defined(USE_OPENSSL)
   EVP_PKEY* key() { return key_; }
@@ -78,25 +88,26 @@
   SECKEYPublicKey* public_key() { return public_key_; }
 #endif
 
+  // Exports the private key to a PKCS #8 PrivateKeyInfo block.
+  bool ExportPrivateKey(std::vector<uint8_t>* output) const;
+
   // Exports the private key as an ASN.1-encoded PKCS #8 EncryptedPrivateKeyInfo
   // block and the public key as an X.509 SubjectPublicKeyInfo block.
   // The |password| and |iterations| are used as inputs to the key derivation
   // function for generating the encryption key.  PKCS #5 recommends a minimum
   // of 1000 iterations, on modern systems a larger value may be preferrable.
+  //
+  // This function is deprecated. Use ExportPrivateKey for new code. See
+  // https://crbug.com/603319.
   bool ExportEncryptedPrivateKey(const std::string& password,
                                  int iterations,
-                                 std::vector<uint8_t>* output);
+                                 std::vector<uint8_t>* output) const;
 
   // Exports the public key to an X.509 SubjectPublicKeyInfo block.
-  bool ExportPublicKey(std::vector<uint8_t>* output);
+  bool ExportPublicKey(std::vector<uint8_t>* output) const;
 
   // Exports the public key as an EC point in the uncompressed point format.
-  bool ExportRawPublicKey(std::string* output);
-
-  // Exports private key data for testing. The format of data stored into output
-  // doesn't matter other than that it is consistent for the same key.
-  bool ExportValue(std::vector<uint8_t>* output);
-  bool ExportECParams(std::vector<uint8_t>* output);
+  bool ExportRawPublicKey(std::string* output) const;
 
  private:
   // Constructor is private. Use one of the Create*() methods above instead.
diff --git a/crypto/hmac.cc b/crypto/hmac.cc
index e9869b4..af5580b 100644
--- a/crypto/hmac.cc
+++ b/crypto/hmac.cc
@@ -47,7 +47,7 @@
   if (digest.empty())
     return false;
   size_t digest_length = DigestLength();
-  scoped_ptr<unsigned char[]> computed_digest(
+  std::unique_ptr<unsigned char[]> computed_digest(
       new unsigned char[digest_length]);
   if (!Sign(data, computed_digest.get(), digest_length))
     return false;
diff --git a/crypto/hmac.h b/crypto/hmac.h
index ccdab30..ec32ed7 100644
--- a/crypto/hmac.h
+++ b/crypto/hmac.h
@@ -10,9 +10,10 @@
 
 #include <stddef.h>
 
+#include <memory>
+
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 #include "crypto/crypto_export.h"
 
@@ -85,7 +86,7 @@
 
  private:
   HashAlgorithm hash_alg_;
-  scoped_ptr<HMACPlatformData> plat_;
+  std::unique_ptr<HMACPlatformData> plat_;
 
   DISALLOW_COPY_AND_ASSIGN(HMAC);
 };
diff --git a/crypto/hmac_openssl.cc b/crypto/hmac_openssl.cc
deleted file mode 100644
index 8c8c11a..0000000
--- a/crypto/hmac_openssl.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/hmac.h"
-
-#include <openssl/hmac.h>
-#include <stddef.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/stl_util.h"
-#include "crypto/openssl_util.h"
-
-namespace crypto {
-
-struct HMACPlatformData {
-  std::vector<unsigned char> key;
-};
-
-HMAC::HMAC(HashAlgorithm hash_alg) : hash_alg_(hash_alg) {
-  // Only SHA-1 and SHA-256 hash algorithms are supported now.
-  DCHECK(hash_alg_ == SHA1 || hash_alg_ == SHA256);
-}
-
-bool HMAC::Init(const unsigned char* key, size_t key_length) {
-  // Init must not be called more than once on the same HMAC object.
-  DCHECK(!plat_);
-  plat_.reset(new HMACPlatformData());
-  plat_->key.assign(key, key + key_length);
-  return true;
-}
-
-HMAC::~HMAC() {
-  if (plat_) {
-    // Zero out key copy.
-    plat_->key.assign(plat_->key.size(), 0);
-    STLClearObject(&plat_->key);
-  }
-}
-
-bool HMAC::Sign(const base::StringPiece& data,
-                unsigned char* digest,
-                size_t digest_length) const {
-  DCHECK(plat_);  // Init must be called before Sign.
-
-  ScopedOpenSSLSafeSizeBuffer<EVP_MAX_MD_SIZE> result(digest, digest_length);
-  return !!::HMAC(hash_alg_ == SHA1 ? EVP_sha1() : EVP_sha256(),
-                  plat_->key.data(), plat_->key.size(),
-                  reinterpret_cast<const unsigned char*>(data.data()),
-                  data.size(), result.safe_buffer(), NULL);
-}
-
-}  // namespace crypto
diff --git a/crypto/hmac_unittest.cc b/crypto/hmac_unittest.cc
index f8dbd5a..9c42dad 100644
--- a/crypto/hmac_unittest.cc
+++ b/crypto/hmac_unittest.cc
@@ -287,7 +287,7 @@
   base::StringPiece data("");
 
   crypto::HMAC hmac(crypto::HMAC::SHA1);
-  ASSERT_TRUE(hmac.Init(NULL, 0));
+  ASSERT_TRUE(hmac.Init(nullptr, 0));
 
   unsigned char digest[kSHA1DigestSize];
   EXPECT_TRUE(hmac.Sign(data, digest, kSHA1DigestSize));
diff --git a/crypto/hmac_win.cc b/crypto/hmac_win.cc
deleted file mode 100644
index ab29081..0000000
--- a/crypto/hmac_win.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/hmac.h"
-
-#include <windows.h>
-#include <stddef.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "base/logging.h"
-#include "crypto/scoped_capi_types.h"
-#include "crypto/third_party/nss/chromium-blapi.h"
-#include "crypto/third_party/nss/chromium-sha256.h"
-#include "crypto/wincrypt_shim.h"
-
-namespace crypto {
-
-namespace {
-
-// Implementation of HMAC-SHA-256:
-//
-// SHA-256 is supported in Windows XP SP3 or later.  We still need to support
-// Windows XP SP2, so unfortunately we have to implement HMAC-SHA-256 here.
-
-enum {
-  SHA256_BLOCK_SIZE = 64  // Block size (in bytes) of the input to SHA-256.
-};
-
-// NSS doesn't accept size_t for text size, divide the data into smaller
-// chunks as needed.
-void Wrapped_SHA256_Update(SHA256Context* ctx, const unsigned char* text,
-                           size_t text_len) {
-  const unsigned int kChunkSize = 1 << 30;
-  while (text_len > kChunkSize) {
-    SHA256_Update(ctx, text, kChunkSize);
-    text += kChunkSize;
-    text_len -= kChunkSize;
-  }
-  SHA256_Update(ctx, text, (unsigned int)text_len);
-}
-
-// See FIPS 198: The Keyed-Hash Message Authentication Code (HMAC).
-void ComputeHMACSHA256(const unsigned char* key, size_t key_len,
-                       const unsigned char* text, size_t text_len,
-                       unsigned char* output, size_t output_len) {
-  SHA256Context ctx;
-
-  // Pre-process the key, if necessary.
-  unsigned char key0[SHA256_BLOCK_SIZE];
-  if (key_len > SHA256_BLOCK_SIZE) {
-    SHA256_Begin(&ctx);
-    Wrapped_SHA256_Update(&ctx, key, key_len);
-    SHA256_End(&ctx, key0, NULL, SHA256_LENGTH);
-    memset(key0 + SHA256_LENGTH, 0, SHA256_BLOCK_SIZE - SHA256_LENGTH);
-  } else {
-    memcpy(key0, key, key_len);
-    if (key_len < SHA256_BLOCK_SIZE)
-      memset(key0 + key_len, 0, SHA256_BLOCK_SIZE - key_len);
-  }
-
-  unsigned char padded_key[SHA256_BLOCK_SIZE];
-  unsigned char inner_hash[SHA256_LENGTH];
-
-  // XOR key0 with ipad.
-  for (int i = 0; i < SHA256_BLOCK_SIZE; ++i)
-    padded_key[i] = key0[i] ^ 0x36;
-
-  // Compute the inner hash.
-  SHA256_Begin(&ctx);
-  SHA256_Update(&ctx, padded_key, SHA256_BLOCK_SIZE);
-  Wrapped_SHA256_Update(&ctx, text, text_len);
-  SHA256_End(&ctx, inner_hash, NULL, SHA256_LENGTH);
-
-  // XOR key0 with opad.
-  for (int i = 0; i < SHA256_BLOCK_SIZE; ++i)
-    padded_key[i] = key0[i] ^ 0x5c;
-
-  // Compute the outer hash.
-  SHA256_Begin(&ctx);
-  SHA256_Update(&ctx, padded_key, SHA256_BLOCK_SIZE);
-  SHA256_Update(&ctx, inner_hash, SHA256_LENGTH);
-  SHA256_End(&ctx, output, NULL, (unsigned int) output_len);
-}
-
-}  // namespace
-
-struct HMACPlatformData {
-  ~HMACPlatformData() {
-    if (!raw_key_.empty()) {
-      SecureZeroMemory(&raw_key_[0], raw_key_.size());
-    }
-
-    // Destroy the key before releasing the provider.
-    key_.reset();
-  }
-
-  ScopedHCRYPTPROV provider_;
-  ScopedHCRYPTKEY key_;
-
-  // For HMAC-SHA-256 only.
-  std::vector<unsigned char> raw_key_;
-};
-
-HMAC::HMAC(HashAlgorithm hash_alg)
-    : hash_alg_(hash_alg), plat_(new HMACPlatformData()) {
-  // Only SHA-1 and SHA-256 hash algorithms are supported now.
-  DCHECK(hash_alg_ == SHA1 || hash_alg_ == SHA256);
-}
-
-bool HMAC::Init(const unsigned char* key, size_t key_length) {
-  if (plat_->provider_ || plat_->key_ || !plat_->raw_key_.empty()) {
-    // Init must not be called more than once on the same HMAC object.
-    NOTREACHED();
-    return false;
-  }
-
-  if (hash_alg_ == SHA256) {
-    plat_->raw_key_.assign(key, key + key_length);
-    return true;
-  }
-
-  if (!CryptAcquireContext(plat_->provider_.receive(), NULL, NULL,
-                           PROV_RSA_FULL, CRYPT_VERIFYCONTEXT)) {
-    NOTREACHED();
-    return false;
-  }
-
-  // This code doesn't work on Win2k because PLAINTEXTKEYBLOB and
-  // CRYPT_IPSEC_HMAC_KEY are not supported on Windows 2000.  PLAINTEXTKEYBLOB
-  // allows the import of an unencrypted key.  For Win2k support, a cubmbersome
-  // exponent-of-one key procedure must be used:
-  //     http://support.microsoft.com/kb/228786/en-us
-  // CRYPT_IPSEC_HMAC_KEY allows keys longer than 16 bytes.
-
-  struct KeyBlob {
-    BLOBHEADER header;
-    DWORD key_size;
-    BYTE key_data[1];
-  };
-  size_t key_blob_size = std::max(offsetof(KeyBlob, key_data) + key_length,
-                                  sizeof(KeyBlob));
-  std::vector<BYTE> key_blob_storage = std::vector<BYTE>(key_blob_size);
-  KeyBlob* key_blob = reinterpret_cast<KeyBlob*>(&key_blob_storage[0]);
-  key_blob->header.bType = PLAINTEXTKEYBLOB;
-  key_blob->header.bVersion = CUR_BLOB_VERSION;
-  key_blob->header.reserved = 0;
-  key_blob->header.aiKeyAlg = CALG_RC2;
-  key_blob->key_size = static_cast<DWORD>(key_length);
-  memcpy(key_blob->key_data, key, key_length);
-
-  if (!CryptImportKey(plat_->provider_, &key_blob_storage[0],
-                      (DWORD)key_blob_storage.size(), 0,
-                      CRYPT_IPSEC_HMAC_KEY, plat_->key_.receive())) {
-    NOTREACHED();
-    return false;
-  }
-
-  // Destroy the copy of the key.
-  SecureZeroMemory(key_blob->key_data, key_length);
-
-  return true;
-}
-
-HMAC::~HMAC() {
-}
-
-bool HMAC::Sign(const base::StringPiece& data,
-                unsigned char* digest,
-                size_t digest_length) const {
-  if (hash_alg_ == SHA256) {
-    if (plat_->raw_key_.empty())
-      return false;
-    ComputeHMACSHA256(&plat_->raw_key_[0], plat_->raw_key_.size(),
-                      reinterpret_cast<const unsigned char*>(data.data()),
-                      data.size(), digest, digest_length);
-    return true;
-  }
-
-  if (!plat_->provider_ || !plat_->key_)
-    return false;
-
-  if (hash_alg_ != SHA1) {
-    NOTREACHED();
-    return false;
-  }
-
-  ScopedHCRYPTHASH hash;
-  if (!CryptCreateHash(plat_->provider_, CALG_HMAC, plat_->key_, 0,
-                       hash.receive()))
-    return false;
-
-  HMAC_INFO hmac_info;
-  memset(&hmac_info, 0, sizeof(hmac_info));
-  hmac_info.HashAlgid = CALG_SHA1;
-  if (!CryptSetHashParam(hash, HP_HMAC_INFO,
-                         reinterpret_cast<BYTE*>(&hmac_info), 0))
-    return false;
-
-  if (!CryptHashData(hash, reinterpret_cast<const BYTE*>(data.data()),
-                     static_cast<DWORD>(data.size()), 0))
-    return false;
-
-  DWORD sha1_size = static_cast<DWORD>(digest_length);
-  return !!CryptGetHashParam(hash, HP_HASHVAL, digest, &sha1_size, 0);
-}
-
-}  // namespace crypto
diff --git a/crypto/nss_key_util.cc b/crypto/nss_key_util.cc
index 3e03489..da8d9c3 100644
--- a/crypto/nss_key_util.cc
+++ b/crypto/nss_key_util.cc
@@ -7,29 +7,26 @@
 #include <cryptohi.h>
 #include <keyhi.h>
 #include <pk11pub.h>
+#include <secmod.h>
 #include <stdint.h>
 
+#include <memory>
+
 #include "base/logging.h"
 #include "crypto/nss_util.h"
-
-#if defined(USE_NSS_CERTS)
-#include <secmod.h>
 #include "crypto/nss_util_internal.h"
-#endif
 
 namespace crypto {
 
 namespace {
 
-#if defined(USE_NSS_CERTS)
-
 struct PublicKeyInfoDeleter {
   inline void operator()(CERTSubjectPublicKeyInfo* spki) {
     SECKEY_DestroySubjectPublicKeyInfo(spki);
   }
 };
 
-typedef scoped_ptr<CERTSubjectPublicKeyInfo, PublicKeyInfoDeleter>
+typedef std::unique_ptr<CERTSubjectPublicKeyInfo, PublicKeyInfoDeleter>
     ScopedPublicKeyInfo;
 
 // Decodes |input| as a SubjectPublicKeyInfo and returns a SECItem containing
@@ -57,8 +54,6 @@
   return ScopedSECItem(PK11_MakeIDFromPubKey(&result->u.rsa.modulus));
 }
 
-#endif  // defined(USE_NSS_CERTS)
-
 }  // namespace
 
 bool GenerateRSAKeyPairNSS(PK11SlotInfo* slot,
@@ -116,8 +111,6 @@
   return ScopedSECKEYPrivateKey(key_raw);
 }
 
-#if defined(USE_NSS_CERTS)
-
 ScopedSECKEYPrivateKey FindNSSKeyFromPublicKeyInfo(
     const std::vector<uint8_t>& input) {
   EnsureNSSInit();
@@ -158,6 +151,4 @@
       PK11_FindKeyByKeyID(slot, cka_id.get(), nullptr));
 }
 
-#endif  // defined(USE_NSS_CERTS)
-
 }  // namespace crypto
diff --git a/crypto/nss_key_util.h b/crypto/nss_key_util.h
index 12b948d..86934dd 100644
--- a/crypto/nss_key_util.h
+++ b/crypto/nss_key_util.h
@@ -36,8 +36,6 @@
                                const std::vector<uint8_t>& input,
                                bool permanent);
 
-#if defined(USE_NSS_CERTS)
-
 // Decodes |input| as a DER-encoded X.509 SubjectPublicKeyInfo and searches for
 // the private key half in the key database. Returns the private key on success
 // or nullptr on error.
@@ -51,8 +49,6 @@
 FindNSSKeyFromPublicKeyInfoInSlot(const std::vector<uint8_t>& input,
                                   PK11SlotInfo* slot);
 
-#endif  // defined(USE_NSS_CERTS)
-
 }  // namespace crypto
 
 #endif  // CRYPTO_NSS_KEY_UTIL_H_
diff --git a/crypto/nss_key_util_unittest.cc b/crypto/nss_key_util_unittest.cc
index 99b52a9..ced9850 100644
--- a/crypto/nss_key_util_unittest.cc
+++ b/crypto/nss_key_util_unittest.cc
@@ -46,7 +46,6 @@
             PK11_GetPrivateModulusLen(private_key.get()));
 }
 
-#if defined(USE_NSS_CERTS)
 TEST_F(NSSKeyUtilTest, FindNSSKeyFromPublicKeyInfo) {
   // Create an NSS keypair, which will put the keys in the user's NSSDB.
   ScopedSECKEYPublicKey public_key;
@@ -83,6 +82,5 @@
 
   EXPECT_FALSE(FindNSSKeyFromPublicKeyInfo(public_key_der));
 }
-#endif  // defined(USE_NSS_CERTS)
 
 }  // namespace crypto
diff --git a/crypto/nss_util.cc b/crypto/nss_util.cc
index cbc57dc..96ee060 100644
--- a/crypto/nss_util.cc
+++ b/crypto/nss_util.cc
@@ -11,6 +11,8 @@
 #include <prinit.h>
 #include <prtime.h>
 #include <secmod.h>
+
+#include <memory>
 #include <utility>
 
 #include "crypto/nss_util_internal.h"
@@ -36,7 +38,6 @@
 #include "base/files/file_util.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/native_library.h"
 #include "base/stl_util.h"
@@ -78,7 +79,7 @@
 std::string GetNSSErrorMessage() {
   std::string result;
   if (PR_GetErrorTextLength()) {
-    scoped_ptr<char[]> error_text(new char[PR_GetErrorTextLength() + 1]);
+    std::unique_ptr<char[]> error_text(new char[PR_GetErrorTextLength() + 1]);
     PRInt32 copied = PR_GetErrorText(error_text.get());
     result = std::string(error_text.get(), copied);
   } else {
@@ -170,7 +171,7 @@
 #endif
 
   if (db_on_nfs) {
-    scoped_ptr<base::Environment> env(base::Environment::Create());
+    std::unique_ptr<base::Environment> env(base::Environment::Create());
     static const char kUseCacheEnvVar[] = "NSS_SDB_USE_CACHE";
     if (!env->HasVar(kUseCacheEnvVar))
       env->SetVar(kUseCacheEnvVar, "yes");
@@ -375,7 +376,8 @@
 
     // Note that a reference is not taken to chaps_module_. This is safe since
     // NSSInitSingleton is Leaky, so the reference it holds is never released.
-    scoped_ptr<TPMModuleAndSlot> tpm_args(new TPMModuleAndSlot(chaps_module_));
+    std::unique_ptr<TPMModuleAndSlot> tpm_args(
+        new TPMModuleAndSlot(chaps_module_));
     TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
     if (base::WorkerPool::PostTaskAndReply(
             FROM_HERE,
@@ -421,7 +423,7 @@
 
   void OnInitializedTPMTokenAndSystemSlot(
       const base::Callback<void(bool)>& callback,
-      scoped_ptr<TPMModuleAndSlot> tpm_args) {
+      std::unique_ptr<TPMModuleAndSlot> tpm_args) {
     DCHECK(thread_checker_.CalledOnValidThread());
     DVLOG(2) << "Loaded chaps: " << !!tpm_args->chaps_module
              << ", got tpm slot: " << !!tpm_args->tpm_slot;
@@ -537,7 +539,8 @@
 
     // Note that a reference is not taken to chaps_module_. This is safe since
     // NSSInitSingleton is Leaky, so the reference it holds is never released.
-    scoped_ptr<TPMModuleAndSlot> tpm_args(new TPMModuleAndSlot(chaps_module_));
+    std::unique_ptr<TPMModuleAndSlot> tpm_args(
+        new TPMModuleAndSlot(chaps_module_));
     TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
     base::WorkerPool::PostTaskAndReply(
         FROM_HERE,
@@ -552,8 +555,9 @@
         );
   }
 
-  void OnInitializedTPMForChromeOSUser(const std::string& username_hash,
-                                       scoped_ptr<TPMModuleAndSlot> tpm_args) {
+  void OnInitializedTPMForChromeOSUser(
+      const std::string& username_hash,
+      std::unique_ptr<TPMModuleAndSlot> tpm_args) {
     DCHECK(thread_checker_.CalledOnValidThread());
     DVLOG(2) << "Got tpm slot for " << username_hash << " "
              << !!tpm_args->tpm_slot;
@@ -806,7 +810,6 @@
     }
   }
 
-#if defined(USE_NSS_CERTS) || defined(OS_IOS)
   // Load nss's built-in root certs.
   SECMODModule* InitDefaultRootCerts() {
     SECMODModule* root = LoadModule("Root Certs", "libnssckbi.so", NULL);
@@ -846,7 +849,6 @@
     }
     return module;
   }
-#endif
 
   bool tpm_token_enabled_for_nss_;
   bool initializing_tpm_token_;
diff --git a/crypto/nss_util.h b/crypto/nss_util.h
index 71e5a67..a8b57ff 100644
--- a/crypto/nss_util.h
+++ b/crypto/nss_util.h
@@ -24,12 +24,10 @@
 // initialization functions.
 namespace crypto {
 
-#if defined(USE_NSS_CERTS)
 // EarlySetupForNSSInit performs lightweight setup which must occur before the
 // process goes multithreaded. This does not initialise NSS. For test, see
 // EnsureNSSInit.
 CRYPTO_EXPORT void EarlySetupForNSSInit();
-#endif
 
 // Initialize NRPR if it isn't already initialized.  This function is
 // thread-safe, and NSPR will only ever be initialized once.
@@ -81,7 +79,6 @@
 // We use a int64_t instead of PRTime here to avoid depending on NSPR headers.
 CRYPTO_EXPORT int64_t BaseTimeToPRTime(base::Time time);
 
-#if defined(USE_NSS_CERTS)
 // NSS has a bug which can cause a deadlock or stall in some cases when writing
 // to the certDB and keyDB. It also has a bug which causes concurrent key pair
 // generations to scribble over each other. To work around this, we synchronize
@@ -102,7 +99,6 @@
   base::Lock *lock_;
   DISALLOW_COPY_AND_ASSIGN(AutoNSSWriteLock);
 };
-#endif  // defined(USE_NSS_CERTS)
 
 }  // namespace crypto
 
diff --git a/crypto/nss_util_internal.h b/crypto/nss_util_internal.h
index 0982a6e..697e376 100644
--- a/crypto/nss_util_internal.h
+++ b/crypto/nss_util_internal.h
@@ -24,7 +24,7 @@
 
 // Opens an NSS software database in folder |path|, with the (potentially)
 // user-visible description |description|. Returns the slot for the opened
-// database, or NULL if the database could not be opened.
+// database, or nullptr if the database could not be opened.
 CRYPTO_EXPORT ScopedPK11Slot OpenSoftwareNSSDB(const base::FilePath& path,
                                                const std::string& description);
 
@@ -57,8 +57,8 @@
 // through |GetSystemNSSKeySlot| and |IsTPMTokenReady| will return true.
 // |InitializeTPMTokenAndSystemSlot|, which triggers the TPM initialization,
 // does not have to be called if the test system slot is set.
-// This must must not be called consecutively with a |slot| != NULL. If |slot|
-// is NULL, the test system slot is unset.
+// This must must not be called consecutively with a |slot| != nullptr. If
+// |slot| is nullptr, the test system slot is unset.
 CRYPTO_EXPORT void SetSystemKeySlotForTesting(ScopedPK11Slot slot);
 
 // Prepare per-user NSS slot mapping. It is safe to call this function multiple
diff --git a/crypto/nss_util_unittest.cc b/crypto/nss_util_unittest.cc
index 2859191..729d5bf 100644
--- a/crypto/nss_util_unittest.cc
+++ b/crypto/nss_util_unittest.cc
@@ -34,7 +34,8 @@
   prxtime.tm_usec = 342000;
 
   PRTime pr_time = PR_ImplodeTime(&prxtime);
-  base::Time base_time = base::Time::FromUTCExploded(exploded);
+  base::Time base_time;
+  EXPECT_TRUE(base::Time::FromUTCExploded(exploded, &base_time));
 
   EXPECT_EQ(base_time, PRTimeToBaseTime(pr_time));
   EXPECT_EQ(pr_time, BaseTimeToPRTime(base_time));
diff --git a/crypto/openssl_util.cc b/crypto/openssl_util.cc
index 48ec3e2..78c6cbb 100644
--- a/crypto/openssl_util.cc
+++ b/crypto/openssl_util.cc
@@ -4,67 +4,23 @@
 
 #include "crypto/openssl_util.h"
 
-#include <openssl/err.h>
-#include <openssl/ssl.h>
+#if defined(OPENSSL_IS_BORINGSSL)
 #include <openssl/cpu.h>
+#else
+#include <openssl/ssl.h>
+#endif
+#include <openssl/crypto.h>
+#include <openssl/err.h>
 #include <stddef.h>
 #include <stdint.h>
 
 #include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/singleton.h"
 #include "base/strings/string_piece.h"
-#include "build/build_config.h"
-
-#if defined(OS_ANDROID) && defined(ARCH_CPU_ARMEL)
-#include <cpu-features.h>
-#include "base/cpu.h"
-#endif
 
 namespace crypto {
 
 namespace {
 
-// Singleton for initializing and cleaning up the OpenSSL library.
-class OpenSSLInitSingleton {
- public:
-  static OpenSSLInitSingleton* GetInstance() {
-    // We allow the SSL environment to leak for multiple reasons:
-    //   -  it is used from a non-joinable worker thread that is not stopped on
-    //      shutdown, hence may still be using OpenSSL library after the AtExit
-    //      runner has completed.
-    //   -  There are other OpenSSL related singletons (e.g. the client socket
-    //      context) who's cleanup depends on the global environment here, but
-    //      we can't control the order the AtExit handlers will run in so
-    //      allowing the global environment to leak at least ensures it is
-    //      available for those other singletons to reliably cleanup.
-    return base::Singleton<
-        OpenSSLInitSingleton,
-        base::LeakySingletonTraits<OpenSSLInitSingleton>>::get();
-  }
- private:
-  friend struct base::DefaultSingletonTraits<OpenSSLInitSingleton>;
-  OpenSSLInitSingleton() {
-#if defined(OS_ANDROID) && defined(ARCH_CPU_ARMEL)
-    const bool has_neon =
-        (android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON) != 0;
-    // CRYPTO_set_NEON_capable is called before |SSL_library_init| because this
-    // stops BoringSSL from probing for NEON support via SIGILL in the case
-    // that getauxval isn't present.
-    CRYPTO_set_NEON_capable(has_neon);
-    // See https://code.google.com/p/chromium/issues/detail?id=341598
-    base::CPU cpu;
-    CRYPTO_set_NEON_functional(!cpu.has_broken_neon());
-#endif
-
-    SSL_library_init();
-  }
-
-  ~OpenSSLInitSingleton() {}
-
-  DISALLOW_COPY_AND_ASSIGN(OpenSSLInitSingleton);
-};
-
 // Callback routine for OpenSSL to print error messages. |str| is a
 // NULL-terminated string of length |len| containing diagnostic information
 // such as the library, function and reason for the error, the file and line
@@ -82,7 +38,12 @@
 }  // namespace
 
 void EnsureOpenSSLInit() {
-  (void)OpenSSLInitSingleton::GetInstance();
+#if defined(OPENSSL_IS_BORINGSSL)
+  // CRYPTO_library_init may be safely called concurrently.
+  CRYPTO_library_init();
+#else
+  SSL_library_init();
+#endif
 }
 
 void ClearOpenSSLERRStack(const tracked_objects::Location& location) {
diff --git a/crypto/openssl_util.h b/crypto/openssl_util.h
index 78fa66e..d608cde 100644
--- a/crypto/openssl_util.h
+++ b/crypto/openssl_util.h
@@ -58,12 +58,12 @@
 // multiple times.
 // This function is thread-safe, and OpenSSL will only ever be initialized once.
 // OpenSSL will be properly shut down on program exit.
-void CRYPTO_EXPORT EnsureOpenSSLInit();
+CRYPTO_EXPORT void EnsureOpenSSLInit();
 
 // Drains the OpenSSL ERR_get_error stack. On a debug build the error codes
 // are send to VLOG(1), on a release build they are disregarded. In most
 // cases you should pass FROM_HERE as the |location|.
-void CRYPTO_EXPORT ClearOpenSSLERRStack(
+CRYPTO_EXPORT void ClearOpenSSLERRStack(
     const tracked_objects::Location& location);
 
 // Place an instance of this class on the call stack to automatically clear
diff --git a/crypto/rsa_private_key.h b/crypto/rsa_private_key.h
index 9703334..d4808f5 100644
--- a/crypto/rsa_private_key.h
+++ b/crypto/rsa_private_key.h
@@ -200,7 +200,7 @@
   // Creates a copy of the object.
   RSAPrivateKey* Copy() const;
 
-  // Exports the private key to a PKCS #1 PrivateKey block.
+  // Exports the private key to a PKCS #8 PrivateKeyInfo block.
   bool ExportPrivateKey(std::vector<uint8_t>* output) const;
 
   // Exports the public key to an X509 SubjectPublicKeyInfo block.
diff --git a/crypto/rsa_private_key_openssl.cc b/crypto/rsa_private_key_openssl.cc
deleted file mode 100644
index f7fdd9d..0000000
--- a/crypto/rsa_private_key_openssl.cc
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/rsa_private_key.h"
-
-#include <openssl/bio.h>
-#include <openssl/bn.h>
-#include <openssl/evp.h>
-#include <openssl/pkcs12.h>
-#include <openssl/rsa.h>
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/openssl_util.h"
-#include "crypto/scoped_openssl_types.h"
-
-namespace crypto {
-
-namespace {
-
-using ScopedPKCS8_PRIV_KEY_INFO =
-    ScopedOpenSSL<PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO_free>;
-
-// Function pointer definition, for injecting the required key export function
-// into ExportKey, below. The supplied function should export EVP_PKEY into
-// the supplied BIO, returning 1 on success or 0 on failure.
-using ExportFunction = int (*)(BIO*, EVP_PKEY*);
-
-// Helper to export |key| into |output| via the specified ExportFunction.
-bool ExportKey(EVP_PKEY* key,
-               ExportFunction export_fn,
-               std::vector<uint8_t>* output) {
-  if (!key)
-    return false;
-
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  ScopedBIO bio(BIO_new(BIO_s_mem()));
-
-  int res = export_fn(bio.get(), key);
-  if (!res)
-    return false;
-
-  char* data = NULL;
-  long len = BIO_get_mem_data(bio.get(), &data);
-  if (!data || len < 0)
-    return false;
-
-  output->assign(data, data + len);
-  return true;
-}
-
-}  // namespace
-
-// static
-RSAPrivateKey* RSAPrivateKey::Create(uint16_t num_bits) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-
-  ScopedRSA rsa_key(RSA_new());
-  ScopedBIGNUM bn(BN_new());
-  if (!rsa_key.get() || !bn.get() || !BN_set_word(bn.get(), 65537L))
-    return NULL;
-
-  if (!RSA_generate_key_ex(rsa_key.get(), num_bits, bn.get(), NULL))
-    return NULL;
-
-  scoped_ptr<RSAPrivateKey> result(new RSAPrivateKey);
-  result->key_ = EVP_PKEY_new();
-  if (!result->key_ || !EVP_PKEY_set1_RSA(result->key_, rsa_key.get()))
-    return NULL;
-
-  return result.release();
-}
-
-// static
-RSAPrivateKey* RSAPrivateKey::CreateFromPrivateKeyInfo(
-    const std::vector<uint8_t>& input) {
-  if (input.empty())
-    return NULL;
-
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-
-  // Importing is a little more involved than exporting, as we must first
-  // PKCS#8 decode the input, and then import the EVP_PKEY from Private Key
-  // Info structure returned.
-  const uint8_t* ptr = &input[0];
-  ScopedPKCS8_PRIV_KEY_INFO p8inf(
-      d2i_PKCS8_PRIV_KEY_INFO(nullptr, &ptr, input.size()));
-  if (!p8inf.get() || ptr != &input[0] + input.size())
-    return NULL;
-
-  scoped_ptr<RSAPrivateKey> result(new RSAPrivateKey);
-  result->key_ = EVP_PKCS82PKEY(p8inf.get());
-  if (!result->key_ || EVP_PKEY_id(result->key_) != EVP_PKEY_RSA)
-    return NULL;
-
-  return result.release();
-}
-
-// static
-RSAPrivateKey* RSAPrivateKey::CreateFromKey(EVP_PKEY* key) {
-  DCHECK(key);
-  if (EVP_PKEY_type(key->type) != EVP_PKEY_RSA)
-    return NULL;
-  RSAPrivateKey* copy = new RSAPrivateKey();
-  copy->key_ = EVP_PKEY_up_ref(key);
-  return copy;
-}
-
-RSAPrivateKey::RSAPrivateKey()
-    : key_(NULL) {
-}
-
-RSAPrivateKey::~RSAPrivateKey() {
-  if (key_)
-    EVP_PKEY_free(key_);
-}
-
-RSAPrivateKey* RSAPrivateKey::Copy() const {
-  scoped_ptr<RSAPrivateKey> copy(new RSAPrivateKey());
-  ScopedRSA rsa(EVP_PKEY_get1_RSA(key_));
-  if (!rsa)
-    return NULL;
-  copy->key_ = EVP_PKEY_new();
-  if (!EVP_PKEY_set1_RSA(copy->key_, rsa.get()))
-    return NULL;
-  return copy.release();
-}
-
-bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
-  return ExportKey(key_, i2d_PKCS8PrivateKeyInfo_bio, output);
-}
-
-bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
-  return ExportKey(key_, i2d_PUBKEY_bio, output);
-}
-
-}  // namespace crypto
diff --git a/crypto/rsa_private_key_unittest.cc b/crypto/rsa_private_key_unittest.cc
index 1401e3d..393a24c 100644
--- a/crypto/rsa_private_key_unittest.cc
+++ b/crypto/rsa_private_key_unittest.cc
@@ -6,7 +6,8 @@
 
 #include <stdint.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
@@ -71,9 +72,9 @@
 // Generate random private keys with two different sizes. Reimport, then
 // export them again. We should get back the same exact bytes.
 TEST(RSAPrivateKeyUnitTest, InitRandomTest) {
-  scoped_ptr<crypto::RSAPrivateKey> keypair1(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair1(
       crypto::RSAPrivateKey::Create(1024));
-  scoped_ptr<crypto::RSAPrivateKey> keypair2(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair2(
       crypto::RSAPrivateKey::Create(2048));
   ASSERT_TRUE(keypair1.get());
   ASSERT_TRUE(keypair2.get());
@@ -88,9 +89,9 @@
   ASSERT_TRUE(keypair1->ExportPublicKey(&pubkey1));
   ASSERT_TRUE(keypair2->ExportPublicKey(&pubkey2));
 
-  scoped_ptr<crypto::RSAPrivateKey> keypair3(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair3(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(privkey1));
-  scoped_ptr<crypto::RSAPrivateKey> keypair4(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair4(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(privkey2));
   ASSERT_TRUE(keypair3.get());
   ASSERT_TRUE(keypair4.get());
@@ -113,10 +114,10 @@
   std::vector<uint8_t> input(kTestPrivateKeyInfo,
                              kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
 
-  scoped_ptr<crypto::RSAPrivateKey> key_copy(key->Copy());
+  std::unique_ptr<crypto::RSAPrivateKey> key_copy(key->Copy());
   ASSERT_TRUE(key_copy.get());
 
   std::vector<uint8_t> privkey_copy;
@@ -131,7 +132,7 @@
                              kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
   input.push_back(0);
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
 
   // Import should fail.
@@ -158,7 +159,7 @@
       kTestEcPrivateKeyInfo,
       kTestEcPrivateKeyInfo + sizeof(kTestEcPrivateKeyInfo));
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
 
   // Import should fail as the given PKCS8 bytes were for an EC key not RSA key.
@@ -187,7 +188,7 @@
   std::vector<uint8_t> input(kTestPrivateKeyInfo,
                              kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
   ASSERT_TRUE(key.get());
 
@@ -334,9 +335,9 @@
   memcpy(&input2.front(), short_integer_without_high_bit,
          sizeof(short_integer_without_high_bit));
 
-  scoped_ptr<crypto::RSAPrivateKey> keypair1(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair1(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input1));
-  scoped_ptr<crypto::RSAPrivateKey> keypair2(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair2(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input2));
   ASSERT_TRUE(keypair1.get());
   ASSERT_TRUE(keypair2.get());
@@ -355,11 +356,11 @@
 }
 
 TEST(RSAPrivateKeyUnitTest, CreateFromKeyTest) {
-  scoped_ptr<crypto::RSAPrivateKey> key_pair(
+  std::unique_ptr<crypto::RSAPrivateKey> key_pair(
       crypto::RSAPrivateKey::Create(512));
   ASSERT_TRUE(key_pair.get());
 
-  scoped_ptr<crypto::RSAPrivateKey> key_copy(
+  std::unique_ptr<crypto::RSAPrivateKey> key_copy(
       crypto::RSAPrivateKey::CreateFromKey(key_pair->key()));
   ASSERT_TRUE(key_copy.get());
 
diff --git a/crypto/scoped_nss_types.h b/crypto/scoped_nss_types.h
index 8e96e8d..a739565 100644
--- a/crypto/scoped_nss_types.h
+++ b/crypto/scoped_nss_types.h
@@ -10,7 +10,7 @@
 #include <pk11pub.h>
 #include <plarena.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
 
 namespace crypto {
 
@@ -29,29 +29,33 @@
 };
 
 // Define some convenient scopers around NSS pointers.
-typedef scoped_ptr<PK11Context,
-                   NSSDestroyer1<PK11Context, PK11_DestroyContext, PR_TRUE> >
+typedef std::unique_ptr<
+    PK11Context,
+    NSSDestroyer1<PK11Context, PK11_DestroyContext, PR_TRUE>>
     ScopedPK11Context;
-typedef scoped_ptr<PK11SlotInfo, NSSDestroyer<PK11SlotInfo, PK11_FreeSlot> >
+typedef std::unique_ptr<PK11SlotInfo, NSSDestroyer<PK11SlotInfo, PK11_FreeSlot>>
     ScopedPK11Slot;
-typedef scoped_ptr<PK11SlotList, NSSDestroyer<PK11SlotList, PK11_FreeSlotList> >
+typedef std::unique_ptr<PK11SlotList,
+                        NSSDestroyer<PK11SlotList, PK11_FreeSlotList>>
     ScopedPK11SlotList;
-typedef scoped_ptr<PK11SymKey, NSSDestroyer<PK11SymKey, PK11_FreeSymKey> >
+typedef std::unique_ptr<PK11SymKey, NSSDestroyer<PK11SymKey, PK11_FreeSymKey>>
     ScopedPK11SymKey;
-typedef scoped_ptr<SECKEYPublicKey,
-                   NSSDestroyer<SECKEYPublicKey, SECKEY_DestroyPublicKey> >
+typedef std::unique_ptr<SECKEYPublicKey,
+                        NSSDestroyer<SECKEYPublicKey, SECKEY_DestroyPublicKey>>
     ScopedSECKEYPublicKey;
-typedef scoped_ptr<SECKEYPrivateKey,
-                   NSSDestroyer<SECKEYPrivateKey, SECKEY_DestroyPrivateKey> >
+typedef std::unique_ptr<
+    SECKEYPrivateKey,
+    NSSDestroyer<SECKEYPrivateKey, SECKEY_DestroyPrivateKey>>
     ScopedSECKEYPrivateKey;
-typedef scoped_ptr<SECAlgorithmID,
-                   NSSDestroyer1<SECAlgorithmID, SECOID_DestroyAlgorithmID,
-                                 PR_TRUE> >
+typedef std::unique_ptr<
+    SECAlgorithmID,
+    NSSDestroyer1<SECAlgorithmID, SECOID_DestroyAlgorithmID, PR_TRUE>>
     ScopedSECAlgorithmID;
-typedef scoped_ptr<SECItem, NSSDestroyer1<SECItem, SECITEM_FreeItem, PR_TRUE> >
+typedef std::unique_ptr<SECItem,
+                        NSSDestroyer1<SECItem, SECITEM_FreeItem, PR_TRUE>>
     ScopedSECItem;
-typedef scoped_ptr<PLArenaPool,
-                   NSSDestroyer1<PLArenaPool, PORT_FreeArena, PR_FALSE> >
+typedef std::unique_ptr<PLArenaPool,
+                        NSSDestroyer1<PLArenaPool, PORT_FreeArena, PR_FALSE>>
     ScopedPLArenaPool;
 
 }  // namespace crypto
diff --git a/crypto/scoped_openssl_types.h b/crypto/scoped_openssl_types.h
index 33b618d..622fed2 100644
--- a/crypto/scoped_openssl_types.h
+++ b/crypto/scoped_openssl_types.h
@@ -17,7 +17,7 @@
 #include <openssl/rsa.h>
 #include <stdint.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
 
 namespace crypto {
 
@@ -31,7 +31,7 @@
 
 template <typename PointerType, void (*Destroyer)(PointerType*)>
 using ScopedOpenSSL =
-    scoped_ptr<PointerType, OpenSSLDestroyer<PointerType, Destroyer>>;
+    std::unique_ptr<PointerType, OpenSSLDestroyer<PointerType, Destroyer>>;
 
 struct OpenSSLFree {
   void operator()(uint8_t* ptr) const { OPENSSL_free(ptr); }
@@ -55,7 +55,7 @@
 using ScopedRSA = ScopedOpenSSL<RSA, RSA_free>;
 
 // The bytes must have been allocated with OPENSSL_malloc.
-using ScopedOpenSSLBytes = scoped_ptr<uint8_t, OpenSSLFree>;
+using ScopedOpenSSLBytes = std::unique_ptr<uint8_t, OpenSSLFree>;
 
 }  // namespace crypto
 
diff --git a/crypto/scoped_test_nss_db.h b/crypto/scoped_test_nss_db.h
index a305b7f..c01653f 100644
--- a/crypto/scoped_test_nss_db.h
+++ b/crypto/scoped_test_nss_db.h
@@ -20,7 +20,7 @@
   ScopedTestNSSDB();
   ~ScopedTestNSSDB();
 
-  bool is_open() const { return slot_; }
+  bool is_open() const { return !!slot_; }
   PK11SlotInfo* slot() const { return slot_.get(); }
 
  private:
diff --git a/crypto/scoped_test_system_nss_key_slot.h b/crypto/scoped_test_system_nss_key_slot.h
index 99a269c..eb8fbc9 100644
--- a/crypto/scoped_test_system_nss_key_slot.h
+++ b/crypto/scoped_test_system_nss_key_slot.h
@@ -5,8 +5,9 @@
 #ifndef CRYPTO_SCOPED_TEST_SYSTEM_NSS_KEY_SLOT_H_
 #define CRYPTO_SCOPED_TEST_SYSTEM_NSS_KEY_SLOT_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "crypto/crypto_export.h"
 
 // Forward declaration, from <pk11pub.h>
@@ -33,7 +34,7 @@
   PK11SlotInfo* slot() const;
 
  private:
-  scoped_ptr<ScopedTestNSSDB> test_db_;
+  std::unique_ptr<ScopedTestNSSDB> test_db_;
 
   DISALLOW_COPY_AND_ASSIGN(ScopedTestSystemNSSKeySlot);
 };
diff --git a/crypto/secure_hash.cc b/crypto/secure_hash.cc
new file mode 100644
index 0000000..9003b9c
--- /dev/null
+++ b/crypto/secure_hash.cc
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crypto/secure_hash.h"
+
+#if defined(OPENSSL_IS_BORINGSSL)
+#include <openssl/mem.h>
+#else
+#include <openssl/crypto.h>
+#endif
+#include <openssl/sha.h>
+#include <stddef.h>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/pickle.h"
+#include "crypto/openssl_util.h"
+
+namespace crypto {
+
+namespace {
+
+class SecureHashSHA256 : public SecureHash {
+ public:
+  SecureHashSHA256() {
+    SHA256_Init(&ctx_);
+  }
+
+  SecureHashSHA256(const SecureHashSHA256& other) : SecureHash() {
+    memcpy(&ctx_, &other.ctx_, sizeof(ctx_));
+  }
+
+  ~SecureHashSHA256() override {
+    OPENSSL_cleanse(&ctx_, sizeof(ctx_));
+  }
+
+  void Update(const void* input, size_t len) override {
+    SHA256_Update(&ctx_, static_cast<const unsigned char*>(input), len);
+  }
+
+  void Finish(void* output, size_t len) override {
+    ScopedOpenSSLSafeSizeBuffer<SHA256_DIGEST_LENGTH> result(
+        static_cast<unsigned char*>(output), len);
+    SHA256_Final(result.safe_buffer(), &ctx_);
+  }
+
+  std::unique_ptr<SecureHash> Clone() const override {
+    return base::MakeUnique<SecureHashSHA256>(*this);
+  }
+
+  size_t GetHashLength() const override { return SHA256_DIGEST_LENGTH; }
+
+ private:
+  SHA256_CTX ctx_;
+};
+
+}  // namespace
+
+std::unique_ptr<SecureHash> SecureHash::Create(Algorithm algorithm) {
+  switch (algorithm) {
+    case SHA256:
+      return base::MakeUnique<SecureHashSHA256>();
+    default:
+      NOTIMPLEMENTED();
+      return nullptr;
+  }
+}
+
+}  // namespace crypto
diff --git a/crypto/secure_hash.h b/crypto/secure_hash.h
index 491a299..30b9fdc 100644
--- a/crypto/secure_hash.h
+++ b/crypto/secure_hash.h
@@ -7,14 +7,11 @@
 
 #include <stddef.h>
 
+#include <memory>
+
 #include "base/macros.h"
 #include "crypto/crypto_export.h"
 
-namespace base {
-class Pickle;
-class PickleIterator;
-}
-
 namespace crypto {
 
 // A wrapper to calculate secure hashes incrementally, allowing to
@@ -26,21 +23,16 @@
   };
   virtual ~SecureHash() {}
 
-  static SecureHash* Create(Algorithm type);
+  static std::unique_ptr<SecureHash> Create(Algorithm type);
 
   virtual void Update(const void* input, size_t len) = 0;
   virtual void Finish(void* output, size_t len) = 0;
+  virtual size_t GetHashLength() const = 0;
 
-  // Serialize the context, so it can be restored at a later time.
-  // |pickle| will contain the serialized data.
-  // Returns whether or not |pickle| was filled.
-  virtual bool Serialize(base::Pickle* pickle) = 0;
-
-  // Restore the context that was saved earlier.
-  // |data_iterator| allows this to be used as part of a larger pickle.
-  // |pickle| holds the saved data.
-  // Returns success or failure.
-  virtual bool Deserialize(base::PickleIterator* data_iterator) = 0;
+  // Create a clone of this SecureHash. The returned clone and this both
+  // represent the same hash state. But from this point on, calling
+  // Update()/Finish() on either doesn't affect the state of the other.
+  virtual std::unique_ptr<SecureHash> Clone() const = 0;
 
  protected:
   SecureHash() {}
diff --git a/crypto/secure_hash_default.cc b/crypto/secure_hash_default.cc
deleted file mode 100644
index cec6fb8..0000000
--- a/crypto/secure_hash_default.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/secure_hash.h"
-
-#include <stddef.h>
-
-#include "base/logging.h"
-#include "base/pickle.h"
-#include "crypto/third_party/nss/chromium-blapi.h"
-#include "crypto/third_party/nss/chromium-sha256.h"
-
-namespace crypto {
-
-namespace {
-
-const char kSHA256Descriptor[] = "NSS";
-
-class SecureHashSHA256NSS : public SecureHash {
- public:
-  static const int kSecureHashVersion = 1;
-
-  SecureHashSHA256NSS() {
-    SHA256_Begin(&ctx_);
-  }
-
-  ~SecureHashSHA256NSS() override { memset(&ctx_, 0, sizeof(ctx_)); }
-
-  // SecureHash implementation:
-  void Update(const void* input, size_t len) override {
-    SHA256_Update(&ctx_, static_cast<const unsigned char*>(input), len);
-  }
-
-  void Finish(void* output, size_t len) override {
-    SHA256_End(&ctx_, static_cast<unsigned char*>(output), NULL,
-               static_cast<unsigned int>(len));
-  }
-
-  bool Serialize(base::Pickle* pickle) override;
-  bool Deserialize(base::PickleIterator* data_iterator) override;
-
- private:
-  SHA256Context ctx_;
-};
-
-bool SecureHashSHA256NSS::Serialize(base::Pickle* pickle) {
-  if (!pickle)
-    return false;
-
-  if (!pickle->WriteInt(kSecureHashVersion) ||
-      !pickle->WriteString(kSHA256Descriptor) ||
-      !pickle->WriteBytes(&ctx_, sizeof(ctx_))) {
-    return false;
-  }
-
-  return true;
-}
-
-bool SecureHashSHA256NSS::Deserialize(base::PickleIterator* data_iterator) {
-  int version;
-  if (!data_iterator->ReadInt(&version))
-    return false;
-
-  if (version > kSecureHashVersion)
-    return false;  // We don't know how to deal with this.
-
-  std::string type;
-  if (!data_iterator->ReadString(&type))
-    return false;
-
-  if (type != kSHA256Descriptor)
-    return false;  // It's the wrong kind.
-
-  const char* data = NULL;
-  if (!data_iterator->ReadBytes(&data, sizeof(ctx_)))
-    return false;
-
-  memcpy(&ctx_, data, sizeof(ctx_));
-
-  return true;
-}
-
-}  // namespace
-
-SecureHash* SecureHash::Create(Algorithm algorithm) {
-  switch (algorithm) {
-    case SHA256:
-      return new SecureHashSHA256NSS();
-    default:
-      NOTIMPLEMENTED();
-      return NULL;
-  }
-}
-
-}  // namespace crypto
diff --git a/crypto/secure_hash_openssl.cc b/crypto/secure_hash_openssl.cc
deleted file mode 100644
index ec859ff..0000000
--- a/crypto/secure_hash_openssl.cc
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/secure_hash.h"
-
-#include <openssl/mem.h>
-#include <openssl/sha.h>
-#include <stddef.h>
-
-#include "base/logging.h"
-#include "base/pickle.h"
-#include "crypto/openssl_util.h"
-
-namespace crypto {
-
-namespace {
-
-const char kSHA256Descriptor[] = "OpenSSL";
-
-class SecureHashSHA256OpenSSL : public SecureHash {
- public:
-  static const int kSecureHashVersion = 1;
-
-  SecureHashSHA256OpenSSL() {
-    SHA256_Init(&ctx_);
-  }
-
-  ~SecureHashSHA256OpenSSL() override {
-    OPENSSL_cleanse(&ctx_, sizeof(ctx_));
-  }
-
-  void Update(const void* input, size_t len) override {
-    SHA256_Update(&ctx_, static_cast<const unsigned char*>(input), len);
-  }
-
-  void Finish(void* output, size_t len) override {
-    ScopedOpenSSLSafeSizeBuffer<SHA256_DIGEST_LENGTH> result(
-        static_cast<unsigned char*>(output), len);
-    SHA256_Final(result.safe_buffer(), &ctx_);
-  }
-
-  bool Serialize(base::Pickle* pickle) override;
-  bool Deserialize(base::PickleIterator* data_iterator) override;
-
- private:
-  SHA256_CTX ctx_;
-};
-
-bool SecureHashSHA256OpenSSL::Serialize(base::Pickle* pickle) {
-  if (!pickle)
-    return false;
-
-  if (!pickle->WriteInt(kSecureHashVersion) ||
-      !pickle->WriteString(kSHA256Descriptor) ||
-      !pickle->WriteBytes(&ctx_, sizeof(ctx_))) {
-    return false;
-  }
-
-  return true;
-}
-
-bool SecureHashSHA256OpenSSL::Deserialize(base::PickleIterator* data_iterator) {
-  if (!data_iterator)
-    return false;
-
-  int version;
-  if (!data_iterator->ReadInt(&version))
-    return false;
-
-  if (version > kSecureHashVersion)
-    return false;  // We don't know how to deal with this.
-
-  std::string type;
-  if (!data_iterator->ReadString(&type))
-    return false;
-
-  if (type != kSHA256Descriptor)
-    return false;  // It's the wrong kind.
-
-  const char* data = NULL;
-  if (!data_iterator->ReadBytes(&data, sizeof(ctx_)))
-    return false;
-
-  memcpy(&ctx_, data, sizeof(ctx_));
-
-  return true;
-}
-
-}  // namespace
-
-SecureHash* SecureHash::Create(Algorithm algorithm) {
-  switch (algorithm) {
-    case SHA256:
-      return new SecureHashSHA256OpenSSL();
-    default:
-      NOTIMPLEMENTED();
-      return NULL;
-  }
-}
-
-}  // namespace crypto
diff --git a/crypto/secure_hash_unittest.cc b/crypto/secure_hash_unittest.cc
index df0afa6..cb9f585 100644
--- a/crypto/secure_hash_unittest.cc
+++ b/crypto/secure_hash_unittest.cc
@@ -7,71 +7,76 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
-#include "base/pickle.h"
 #include "crypto/sha2.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(SecureHashTest, TestUpdate) {
   // Example B.3 from FIPS 180-2: long message.
   std::string input3(500000, 'a');  // 'a' repeated half a million times
-  int expected3[] = { 0xcd, 0xc7, 0x6e, 0x5c,
-                      0x99, 0x14, 0xfb, 0x92,
-                      0x81, 0xa1, 0xc7, 0xe2,
-                      0x84, 0xd7, 0x3e, 0x67,
-                      0xf1, 0x80, 0x9a, 0x48,
-                      0xa4, 0x97, 0x20, 0x0e,
-                      0x04, 0x6d, 0x39, 0xcc,
-                      0xc7, 0x11, 0x2c, 0xd0 };
+  const int kExpectedHashOfInput3[] = {
+      0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7,
+      0xe2, 0x84, 0xd7, 0x3e, 0x67, 0xf1, 0x80, 0x9a, 0x48, 0xa4, 0x97,
+      0x20, 0x0e, 0x04, 0x6d, 0x39, 0xcc, 0xc7, 0x11, 0x2c, 0xd0};
 
   uint8_t output3[crypto::kSHA256Length];
 
-  scoped_ptr<crypto::SecureHash> ctx(crypto::SecureHash::Create(
-      crypto::SecureHash::SHA256));
+  std::unique_ptr<crypto::SecureHash> ctx(
+      crypto::SecureHash::Create(crypto::SecureHash::SHA256));
   ctx->Update(input3.data(), input3.size());
   ctx->Update(input3.data(), input3.size());
 
   ctx->Finish(output3, sizeof(output3));
   for (size_t i = 0; i < crypto::kSHA256Length; i++)
-    EXPECT_EQ(expected3[i], static_cast<int>(output3[i]));
+    EXPECT_EQ(kExpectedHashOfInput3[i], static_cast<int>(output3[i]));
 }
 
-// Save the crypto state mid-stream, and create another instance with the
-// saved state.  Then feed the same data afterwards to both.
-// When done, both should have the same hash value.
-TEST(SecureHashTest, TestSerialization) {
+TEST(SecureHashTest, TestClone) {
   std::string input1(10001, 'a');  // 'a' repeated 10001 times
-  std::string input2(10001, 'b');  // 'b' repeated 10001 times
-  std::string input3(10001, 'c');  // 'c' repeated 10001 times
-  std::string input4(10001, 'd');  // 'd' repeated 10001 times
-  std::string input5(10001, 'e');  // 'e' repeated 10001 times
+  std::string input2(10001, 'd');  // 'd' repeated 10001 times
+
+  const uint8_t kExpectedHashOfInput1[crypto::kSHA256Length] = {
+      0x0c, 0xab, 0x99, 0xa0, 0x58, 0x60, 0x0f, 0xfa, 0xad, 0x12, 0x92,
+      0xd0, 0xc5, 0x3c, 0x05, 0x48, 0xeb, 0xaf, 0x88, 0xdd, 0x1d, 0x01,
+      0x03, 0x03, 0x45, 0x70, 0x5f, 0x01, 0x8a, 0x81, 0x39, 0x09};
+  const uint8_t kExpectedHashOfInput1And2[crypto::kSHA256Length] = {
+      0x4c, 0x8e, 0x26, 0x5a, 0xc3, 0x85, 0x1f, 0x1f, 0xa5, 0x04, 0x1c,
+      0xc7, 0x88, 0x53, 0x1c, 0xc7, 0x80, 0x47, 0x15, 0xfb, 0x47, 0xff,
+      0x72, 0xb1, 0x28, 0x37, 0xb0, 0x4d, 0x6e, 0x22, 0x2e, 0x4d};
 
   uint8_t output1[crypto::kSHA256Length];
   uint8_t output2[crypto::kSHA256Length];
+  uint8_t output3[crypto::kSHA256Length];
 
-  scoped_ptr<crypto::SecureHash> ctx1(crypto::SecureHash::Create(
-      crypto::SecureHash::SHA256));
-  scoped_ptr<crypto::SecureHash> ctx2(crypto::SecureHash::Create(
-      crypto::SecureHash::SHA256));
-  base::Pickle pickle;
+  std::unique_ptr<crypto::SecureHash> ctx1(
+      crypto::SecureHash::Create(crypto::SecureHash::SHA256));
   ctx1->Update(input1.data(), input1.size());
+
+  std::unique_ptr<crypto::SecureHash> ctx2(ctx1->Clone());
+  std::unique_ptr<crypto::SecureHash> ctx3(ctx2->Clone());
+  // At this point, ctx1, ctx2, and ctx3 are all equivalent and represent the
+  // state after hashing input1.
+
+  // Updating ctx1 and ctx2 with input2 should produce equivalent results.
   ctx1->Update(input2.data(), input2.size());
-  ctx1->Update(input3.data(), input3.size());
-
-  EXPECT_TRUE(ctx1->Serialize(&pickle));
-  ctx1->Update(input4.data(), input4.size());
-  ctx1->Update(input5.data(), input5.size());
-
   ctx1->Finish(output1, sizeof(output1));
 
-  base::PickleIterator data_iterator(pickle);
-  EXPECT_TRUE(ctx2->Deserialize(&data_iterator));
-  ctx2->Update(input4.data(), input4.size());
-  ctx2->Update(input5.data(), input5.size());
-
+  ctx2->Update(input2.data(), input2.size());
   ctx2->Finish(output2, sizeof(output2));
 
   EXPECT_EQ(0, memcmp(output1, output2, crypto::kSHA256Length));
+  EXPECT_EQ(0,
+            memcmp(output1, kExpectedHashOfInput1And2, crypto::kSHA256Length));
+
+  // Finish() ctx3, which should produce the hash of input1.
+  ctx3->Finish(&output3, sizeof(output3));
+  EXPECT_EQ(0, memcmp(output3, kExpectedHashOfInput1, crypto::kSHA256Length));
+}
+
+TEST(SecureHashTest, TestLength) {
+  std::unique_ptr<crypto::SecureHash> ctx(
+      crypto::SecureHash::Create(crypto::SecureHash::SHA256));
+  EXPECT_EQ(crypto::kSHA256Length, ctx->GetHashLength());
 }
diff --git a/crypto/sha2.cc b/crypto/sha2.cc
index 2646d1b..e97b8f4 100644
--- a/crypto/sha2.cc
+++ b/crypto/sha2.cc
@@ -6,14 +6,15 @@
 
 #include <stddef.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "base/stl_util.h"
 #include "crypto/secure_hash.h"
 
 namespace crypto {
 
 void SHA256HashString(const base::StringPiece& str, void* output, size_t len) {
-  scoped_ptr<SecureHash> ctx(SecureHash::Create(SecureHash::SHA256));
+  std::unique_ptr<SecureHash> ctx(SecureHash::Create(SecureHash::SHA256));
   ctx->Update(str.data(), str.length());
   ctx->Finish(output, len);
 }
diff --git a/crypto/signature_creator.h b/crypto/signature_creator.h
index abd1546..1e8e856 100644
--- a/crypto/signature_creator.h
+++ b/crypto/signature_creator.h
@@ -7,6 +7,7 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/macros.h"
@@ -40,8 +41,8 @@
   // Create an instance. The caller must ensure that the provided PrivateKey
   // instance outlives the created SignatureCreator. Uses the HashAlgorithm
   // specified.
-  static SignatureCreator* Create(RSAPrivateKey* key, HashAlgorithm hash_alg);
-
+  static std::unique_ptr<SignatureCreator> Create(RSAPrivateKey* key,
+                                                  HashAlgorithm hash_alg);
 
   // Signs the precomputed |hash_alg| digest |data| using private |key| as
   // specified in PKCS #1 v1.5.
diff --git a/crypto/signature_creator_openssl.cc b/crypto/signature_creator_openssl.cc
deleted file mode 100644
index d5fc4d4..0000000
--- a/crypto/signature_creator_openssl.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/signature_creator.h"
-
-#include <openssl/evp.h>
-#include <openssl/rsa.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/openssl_util.h"
-#include "crypto/rsa_private_key.h"
-#include "crypto/scoped_openssl_types.h"
-
-namespace crypto {
-
-namespace {
-
-const EVP_MD* ToOpenSSLDigest(SignatureCreator::HashAlgorithm hash_alg) {
-  switch (hash_alg) {
-    case SignatureCreator::SHA1:
-      return EVP_sha1();
-    case SignatureCreator::SHA256:
-      return EVP_sha256();
-  }
-  return NULL;
-}
-
-int ToOpenSSLDigestType(SignatureCreator::HashAlgorithm hash_alg) {
-  switch (hash_alg) {
-    case SignatureCreator::SHA1:
-      return NID_sha1;
-    case SignatureCreator::SHA256:
-      return NID_sha256;
-  }
-  return NID_undef;
-}
-
-}  // namespace
-
-// static
-SignatureCreator* SignatureCreator::Create(RSAPrivateKey* key,
-                                           HashAlgorithm hash_alg) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  scoped_ptr<SignatureCreator> result(new SignatureCreator);
-  const EVP_MD* const digest = ToOpenSSLDigest(hash_alg);
-  DCHECK(digest);
-  if (!digest) {
-    return NULL;
-  }
-  if (!EVP_DigestSignInit(result->sign_context_, NULL, digest, NULL,
-                          key->key())) {
-    return NULL;
-  }
-  return result.release();
-}
-
-// static
-bool SignatureCreator::Sign(RSAPrivateKey* key,
-                            HashAlgorithm hash_alg,
-                            const uint8_t* data,
-                            int data_len,
-                            std::vector<uint8_t>* signature) {
-  ScopedRSA rsa_key(EVP_PKEY_get1_RSA(key->key()));
-  if (!rsa_key)
-    return false;
-  signature->resize(RSA_size(rsa_key.get()));
-
-  unsigned int len = 0;
-  if (!RSA_sign(ToOpenSSLDigestType(hash_alg), data, data_len,
-                signature->data(), &len, rsa_key.get())) {
-    signature->clear();
-    return false;
-  }
-  signature->resize(len);
-  return true;
-}
-
-SignatureCreator::SignatureCreator()
-    : sign_context_(EVP_MD_CTX_create()) {
-}
-
-SignatureCreator::~SignatureCreator() {
-  EVP_MD_CTX_destroy(sign_context_);
-}
-
-bool SignatureCreator::Update(const uint8_t* data_part, int data_part_len) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  return !!EVP_DigestSignUpdate(sign_context_, data_part, data_part_len);
-}
-
-bool SignatureCreator::Final(std::vector<uint8_t>* signature) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-
-  // Determine the maximum length of the signature.
-  size_t len = 0;
-  if (!EVP_DigestSignFinal(sign_context_, NULL, &len)) {
-    signature->clear();
-    return false;
-  }
-  signature->resize(len);
-
-  // Sign it.
-  if (!EVP_DigestSignFinal(sign_context_, signature->data(), &len)) {
-    signature->clear();
-    return false;
-  }
-  signature->resize(len);
-  return true;
-}
-
-}  // namespace crypto
diff --git a/crypto/signature_creator_unittest.cc b/crypto/signature_creator_unittest.cc
index af1a042..819e663 100644
--- a/crypto/signature_creator_unittest.cc
+++ b/crypto/signature_creator_unittest.cc
@@ -2,44 +2,32 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "crypto/signature_creator.h"
+
 #include <stdint.h>
 
+#include <memory>
 #include <vector>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/sha1.h"
 #include "crypto/rsa_private_key.h"
 #include "crypto/sha2.h"
-#include "crypto/signature_creator.h"
 #include "crypto/signature_verifier.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-namespace {
-
-// This is the algorithm ID for SHA-1 with RSA encryption.
-const uint8_t kSHA1WithRSAAlgorithmID[] = {0x30, 0x0d, 0x06, 0x09, 0x2a,
-                                           0x86, 0x48, 0x86, 0xf7, 0x0d,
-                                           0x01, 0x01, 0x05, 0x05, 0x00};
-
-// This is the algorithm ID for SHA-1 with RSA encryption.
-const uint8_t kSHA256WithRSAAlgorithmID[] = {0x30, 0x0d, 0x06, 0x09, 0x2a,
-                                             0x86, 0x48, 0x86, 0xf7, 0x0d,
-                                             0x01, 0x01, 0x0B, 0x05, 0x00};
-}
-
 TEST(SignatureCreatorTest, BasicTest) {
   // Do a verify round trip.
-  scoped_ptr<crypto::RSAPrivateKey> key_original(
+  std::unique_ptr<crypto::RSAPrivateKey> key_original(
       crypto::RSAPrivateKey::Create(1024));
   ASSERT_TRUE(key_original.get());
 
   std::vector<uint8_t> key_info;
   key_original->ExportPrivateKey(&key_info);
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
   ASSERT_TRUE(key.get());
 
-  scoped_ptr<crypto::SignatureCreator> signer(
+  std::unique_ptr<crypto::SignatureCreator> signer(
       crypto::SignatureCreator::Create(key.get(),
                                        crypto::SignatureCreator::SHA1));
   ASSERT_TRUE(signer.get());
@@ -56,9 +44,8 @@
 
   crypto::SignatureVerifier verifier;
   ASSERT_TRUE(verifier.VerifyInit(
-      kSHA1WithRSAAlgorithmID, sizeof(kSHA1WithRSAAlgorithmID),
-      &signature.front(), signature.size(),
-      &public_key_info.front(), public_key_info.size()));
+      crypto::SignatureVerifier::RSA_PKCS1_SHA1, &signature.front(),
+      signature.size(), &public_key_info.front(), public_key_info.size()));
 
   verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
                         data.size());
@@ -67,13 +54,13 @@
 
 TEST(SignatureCreatorTest, SignDigestTest) {
   // Do a verify round trip.
-  scoped_ptr<crypto::RSAPrivateKey> key_original(
+  std::unique_ptr<crypto::RSAPrivateKey> key_original(
       crypto::RSAPrivateKey::Create(1024));
   ASSERT_TRUE(key_original.get());
 
   std::vector<uint8_t> key_info;
   key_original->ExportPrivateKey(&key_info);
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
   ASSERT_TRUE(key.get());
 
@@ -91,9 +78,8 @@
   // Verify the input data.
   crypto::SignatureVerifier verifier;
   ASSERT_TRUE(verifier.VerifyInit(
-      kSHA1WithRSAAlgorithmID, sizeof(kSHA1WithRSAAlgorithmID),
-      &signature.front(), signature.size(),
-      &public_key_info.front(), public_key_info.size()));
+      crypto::SignatureVerifier::RSA_PKCS1_SHA1, &signature.front(),
+      signature.size(), &public_key_info.front(), public_key_info.size()));
 
   verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
                         data.size());
@@ -102,13 +88,13 @@
 
 TEST(SignatureCreatorTest, SignSHA256DigestTest) {
   // Do a verify round trip.
-  scoped_ptr<crypto::RSAPrivateKey> key_original(
+  std::unique_ptr<crypto::RSAPrivateKey> key_original(
       crypto::RSAPrivateKey::Create(1024));
   ASSERT_TRUE(key_original.get());
 
   std::vector<uint8_t> key_info;
   key_original->ExportPrivateKey(&key_info);
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
   ASSERT_TRUE(key.get());
 
@@ -127,9 +113,8 @@
   // Verify the input data.
   crypto::SignatureVerifier verifier;
   ASSERT_TRUE(verifier.VerifyInit(
-      kSHA256WithRSAAlgorithmID, sizeof(kSHA256WithRSAAlgorithmID),
-      &signature.front(), signature.size(),
-      &public_key_info.front(), public_key_info.size()));
+      crypto::SignatureVerifier::RSA_PKCS1_SHA256, &signature.front(),
+      signature.size(), &public_key_info.front(), public_key_info.size()));
 
   verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
                         data.size());
diff --git a/crypto/signature_verifier.h b/crypto/signature_verifier.h
index b26a0df..5b7369f 100644
--- a/crypto/signature_verifier.h
+++ b/crypto/signature_verifier.h
@@ -33,6 +33,13 @@
     SHA256,
   };
 
+  // The set of supported signature algorithms. Extend as required.
+  enum SignatureAlgorithm {
+    RSA_PKCS1_SHA1,
+    RSA_PKCS1_SHA256,
+    ECDSA_SHA256,
+  };
+
   SignatureVerifier();
   ~SignatureVerifier();
 
@@ -42,16 +49,7 @@
   // by one or more VerifyUpdate calls and a VerifyFinal call.
   // NOTE: for RSA-PSS signatures, use VerifyInitRSAPSS instead.
   //
-  // The signature algorithm is specified as a DER encoded ASN.1
-  // AlgorithmIdentifier structure:
-  //   AlgorithmIdentifier  ::=  SEQUENCE  {
-  //       algorithm               OBJECT IDENTIFIER,
-  //       parameters              ANY DEFINED BY algorithm OPTIONAL  }
-  //
-  // The signature is encoded according to the signature algorithm, but it
-  // must not be further encoded in an ASN.1 BIT STRING.
-  // Note: An RSA signature is actually a big integer.  It must be in
-  // big-endian byte order.
+  // The signature is encoded according to the signature algorithm.
   //
   // The public key is specified as a DER encoded ASN.1 SubjectPublicKeyInfo
   // structure, which contains not only the public key but also its type
@@ -59,8 +57,7 @@
   //   SubjectPublicKeyInfo  ::=  SEQUENCE  {
   //       algorithm            AlgorithmIdentifier,
   //       subjectPublicKey     BIT STRING  }
-  bool VerifyInit(const uint8_t* signature_algorithm,
-                  int signature_algorithm_len,
+  bool VerifyInit(SignatureAlgorithm signature_algorithm,
                   const uint8_t* signature,
                   int signature_len,
                   const uint8_t* public_key_info,
@@ -98,19 +95,10 @@
   // error occurred.
   bool VerifyFinal();
 
-  // Note: we can provide a one-shot interface if there is interest:
-  //   bool Verify(const uint8_t* data,
-  //               int data_len,
-  //               const uint8_t* signature_algorithm,
-  //               int signature_algorithm_len,
-  //               const uint8_t* signature,
-  //               int signature_len,
-  //               const uint8_t* public_key_info,
-  //               int public_key_info_len);
-
  private:
 #if defined(USE_OPENSSL)
-  bool CommonInit(const EVP_MD* digest,
+  bool CommonInit(int pkey_type,
+                  const EVP_MD* digest,
                   const uint8_t* signature,
                   int signature_len,
                   const uint8_t* public_key_info,
diff --git a/crypto/signature_verifier_nss.cc b/crypto/signature_verifier_nss.cc
index e6cd3e0..edbd3f6 100644
--- a/crypto/signature_verifier_nss.cc
+++ b/crypto/signature_verifier_nss.cc
@@ -30,6 +30,18 @@
   return HASH_AlgNULL;
 }
 
+SECOidTag ToNSSSignatureType(SignatureVerifier::SignatureAlgorithm sig_alg) {
+  switch (sig_alg) {
+    case SignatureVerifier::RSA_PKCS1_SHA1:
+      return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
+    case SignatureVerifier::RSA_PKCS1_SHA256:
+      return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
+    case SignatureVerifier::ECDSA_SHA256:
+      return SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE;
+  }
+  return SEC_OID_UNKNOWN;
+}
+
 SECStatus VerifyRSAPSS_End(SECKEYPublicKey* public_key,
                            HASHContext* hash_context,
                            HASH_HashType mask_hash_alg,
@@ -74,8 +86,7 @@
   Reset();
 }
 
-bool SignatureVerifier::VerifyInit(const uint8_t* signature_algorithm,
-                                   int signature_algorithm_len,
+bool SignatureVerifier::VerifyInit(SignatureAlgorithm signature_algorithm,
                                    const uint8_t* signature,
                                    int signature_len,
                                    const uint8_t* public_key_info,
@@ -90,37 +101,13 @@
   if (!public_key)
     return false;
 
-  PLArenaPool* arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE);
-  if (!arena) {
-    SECKEY_DestroyPublicKey(public_key);
-    return false;
-  }
-
-  SECItem sig_alg_der;
-  sig_alg_der.type = siBuffer;
-  sig_alg_der.data = const_cast<uint8_t*>(signature_algorithm);
-  sig_alg_der.len = signature_algorithm_len;
-  SECAlgorithmID sig_alg_id;
-  SECStatus rv;
-  rv = SEC_QuickDERDecodeItem(arena, &sig_alg_id,
-                              SEC_ASN1_GET(SECOID_AlgorithmIDTemplate),
-                              &sig_alg_der);
-  if (rv != SECSuccess) {
-    SECKEY_DestroyPublicKey(public_key);
-    PORT_FreeArena(arena, PR_TRUE);
-    return false;
-  }
-
   SECItem sig;
   sig.type = siBuffer;
   sig.data = const_cast<uint8_t*>(signature);
   sig.len = signature_len;
-  SECOidTag hash_alg_tag;
-  vfy_context_ = VFY_CreateContextWithAlgorithmID(public_key, &sig,
-                                                  &sig_alg_id, &hash_alg_tag,
-                                                  NULL);
+  vfy_context_ = VFY_CreateContext(
+      public_key, &sig, ToNSSSignatureType(signature_algorithm), nullptr);
   SECKEY_DestroyPublicKey(public_key);  // Done with public_key.
-  PORT_FreeArena(arena, PR_TRUE);  // Done with sig_alg_id.
   if (!vfy_context_) {
     // A corrupted RSA signature could be detected without the data, so
     // VFY_CreateContextWithAlgorithmID may fail with SEC_ERROR_BAD_SIGNATURE
@@ -128,8 +115,7 @@
     return false;
   }
 
-  rv = VFY_Begin(vfy_context_);
-  if (rv != SECSuccess) {
+  if (VFY_Begin(vfy_context_) != SECSuccess) {
     NOTREACHED();
     return false;
   }
diff --git a/crypto/signature_verifier_openssl.cc b/crypto/signature_verifier_openssl.cc
deleted file mode 100644
index a756149..0000000
--- a/crypto/signature_verifier_openssl.cc
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/signature_verifier.h"
-
-#include <openssl/evp.h>
-#include <openssl/x509.h>
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/openssl_util.h"
-#include "crypto/scoped_openssl_types.h"
-
-namespace crypto {
-
-namespace {
-
-const EVP_MD* ToOpenSSLDigest(SignatureVerifier::HashAlgorithm hash_alg) {
-  switch (hash_alg) {
-    case SignatureVerifier::SHA1:
-      return EVP_sha1();
-    case SignatureVerifier::SHA256:
-      return EVP_sha256();
-  }
-  return NULL;
-}
-
-}  // namespace
-
-struct SignatureVerifier::VerifyContext {
-  ScopedEVP_MD_CTX ctx;
-};
-
-SignatureVerifier::SignatureVerifier()
-    : verify_context_(NULL) {
-}
-
-SignatureVerifier::~SignatureVerifier() {
-  Reset();
-}
-
-bool SignatureVerifier::VerifyInit(const uint8_t* signature_algorithm,
-                                   int signature_algorithm_len,
-                                   const uint8_t* signature,
-                                   int signature_len,
-                                   const uint8_t* public_key_info,
-                                   int public_key_info_len) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  ScopedOpenSSL<X509_ALGOR, X509_ALGOR_free> algorithm(
-      d2i_X509_ALGOR(NULL, &signature_algorithm, signature_algorithm_len));
-  if (!algorithm.get())
-    return false;
-  int nid = OBJ_obj2nid(algorithm.get()->algorithm);
-  const EVP_MD* digest;
-  if (nid == NID_ecdsa_with_SHA1) {
-    digest = EVP_sha1();
-  } else if (nid == NID_ecdsa_with_SHA256) {
-    digest = EVP_sha256();
-  } else {
-    // This works for PKCS #1 v1.5 RSA signatures, but not for ECDSA
-    // signatures.
-    digest = EVP_get_digestbyobj(algorithm.get()->algorithm);
-  }
-  if (!digest)
-    return false;
-
-  return CommonInit(digest, signature, signature_len, public_key_info,
-                    public_key_info_len, NULL);
-}
-
-bool SignatureVerifier::VerifyInitRSAPSS(HashAlgorithm hash_alg,
-                                         HashAlgorithm mask_hash_alg,
-                                         int salt_len,
-                                         const uint8_t* signature,
-                                         int signature_len,
-                                         const uint8_t* public_key_info,
-                                         int public_key_info_len) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  const EVP_MD* const digest = ToOpenSSLDigest(hash_alg);
-  DCHECK(digest);
-  if (!digest) {
-    return false;
-  }
-
-  EVP_PKEY_CTX* pkey_ctx;
-  if (!CommonInit(digest, signature, signature_len, public_key_info,
-                  public_key_info_len, &pkey_ctx)) {
-    return false;
-  }
-
-  int rv = EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, RSA_PKCS1_PSS_PADDING);
-  if (rv != 1)
-    return false;
-  const EVP_MD* const mgf_digest = ToOpenSSLDigest(mask_hash_alg);
-  DCHECK(mgf_digest);
-  if (!mgf_digest) {
-    return false;
-  }
-  rv = EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf_digest);
-  if (rv != 1)
-    return false;
-  rv = EVP_PKEY_CTX_set_rsa_pss_saltlen(pkey_ctx, salt_len);
-  return rv == 1;
-}
-
-void SignatureVerifier::VerifyUpdate(const uint8_t* data_part,
-                                     int data_part_len) {
-  DCHECK(verify_context_);
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  int rv = EVP_DigestVerifyUpdate(verify_context_->ctx.get(),
-                                  data_part, data_part_len);
-  DCHECK_EQ(rv, 1);
-}
-
-bool SignatureVerifier::VerifyFinal() {
-  DCHECK(verify_context_);
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  int rv = EVP_DigestVerifyFinal(verify_context_->ctx.get(), signature_.data(),
-                                 signature_.size());
-  DCHECK_EQ(static_cast<int>(!!rv), rv);
-  Reset();
-  return rv == 1;
-}
-
-bool SignatureVerifier::CommonInit(const EVP_MD* digest,
-                                   const uint8_t* signature,
-                                   int signature_len,
-                                   const uint8_t* public_key_info,
-                                   int public_key_info_len,
-                                   EVP_PKEY_CTX** pkey_ctx) {
-  if (verify_context_)
-    return false;
-
-  verify_context_ = new VerifyContext;
-
-  signature_.assign(signature, signature + signature_len);
-
-  const uint8_t* ptr = public_key_info;
-  ScopedEVP_PKEY public_key(d2i_PUBKEY(nullptr, &ptr, public_key_info_len));
-  if (!public_key.get() || ptr != public_key_info + public_key_info_len)
-    return false;
-
-  verify_context_->ctx.reset(EVP_MD_CTX_create());
-  int rv = EVP_DigestVerifyInit(verify_context_->ctx.get(), pkey_ctx,
-                                digest, nullptr, public_key.get());
-  return rv == 1;
-}
-
-void SignatureVerifier::Reset() {
-  delete verify_context_;
-  verify_context_ = NULL;
-  signature_.clear();
-}
-
-}  // namespace crypto
diff --git a/crypto/signature_verifier_unittest.cc b/crypto/signature_verifier_unittest.cc
index adcc885..d71ea82 100644
--- a/crypto/signature_verifier_unittest.cc
+++ b/crypto/signature_verifier_unittest.cc
@@ -14,9 +14,9 @@
 TEST(SignatureVerifierTest, BasicTest) {
   // The input data in this test comes from real certificates.
   //
-  // tbs_certificate ("to-be-signed certificate", the part of a certificate
-  // that is signed), signature_algorithm, and algorithm come from the
-  // certificate of bugs.webkit.org.
+  // tbs_certificate ("to-be-signed certificate", the part of a certificate that
+  // is signed), signature, and algorithm come from the certificate of
+  // bugs.webkit.org.
   //
   // public_key_info comes from the certificate of the issuer, Go Daddy Secure
   // Certification Authority.
@@ -116,19 +116,6 @@
       0x74, 0x2e, 0x6f, 0x72, 0x67, 0x82, 0x0a, 0x77, 0x65, 0x62, 0x6b, 0x69,
       0x74, 0x2e, 0x6f, 0x72, 0x67};
 
-  // The signature algorithm is specified as the following ASN.1 structure:
-  //    AlgorithmIdentifier  ::=  SEQUENCE  {
-  //        algorithm               OBJECT IDENTIFIER,
-  //        parameters              ANY DEFINED BY algorithm OPTIONAL  }
-  //
-  const uint8_t signature_algorithm[15] = {
-      0x30, 0x0d,  // a SEQUENCE of length 13 (0xd)
-      0x06, 0x09,  // an OBJECT IDENTIFIER of length 9
-      // 1.2.840.113549.1.1.5 - sha1WithRSAEncryption
-      0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05,
-      0x00,  // a NULL of length 0
-  };
-
   // RSA signature, a big integer in the big-endian byte order.
   const uint8_t signature[256] = {
       0x1e, 0x6a, 0xe7, 0xe0, 0x4f, 0xe7, 0x4d, 0xd0, 0x69, 0x7c, 0xf8, 0x8f,
@@ -202,12 +189,11 @@
   crypto::SignatureVerifier verifier;
   bool ok;
 
-  // Test 1: feed all of the data to the verifier at once (a single
+  // Test  1: feed all of the data to the verifier at once (a single
   // VerifyUpdate call).
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           public_key_info, sizeof(public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), public_key_info,
+                           sizeof(public_key_info));
   EXPECT_TRUE(ok);
   verifier.VerifyUpdate(tbs_certificate, sizeof(tbs_certificate));
   ok = verifier.VerifyFinal();
@@ -215,12 +201,11 @@
 
   // Test 2: feed the data to the verifier in three parts (three VerifyUpdate
   // calls).
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           public_key_info, sizeof(public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), public_key_info,
+                           sizeof(public_key_info));
   EXPECT_TRUE(ok);
-  verifier.VerifyUpdate(tbs_certificate,       256);
+  verifier.VerifyUpdate(tbs_certificate, 256);
   verifier.VerifyUpdate(tbs_certificate + 256, 256);
   verifier.VerifyUpdate(tbs_certificate + 512, sizeof(tbs_certificate) - 512);
   ok = verifier.VerifyFinal();
@@ -230,10 +215,9 @@
   uint8_t bad_tbs_certificate[sizeof(tbs_certificate)];
   memcpy(bad_tbs_certificate, tbs_certificate, sizeof(tbs_certificate));
   bad_tbs_certificate[10] += 1;  // Corrupt one byte of the data.
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           public_key_info, sizeof(public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), public_key_info,
+                           sizeof(public_key_info));
   EXPECT_TRUE(ok);
   verifier.VerifyUpdate(bad_tbs_certificate, sizeof(bad_tbs_certificate));
   ok = verifier.VerifyFinal();
@@ -243,8 +227,7 @@
   uint8_t bad_signature[sizeof(signature)];
   memcpy(bad_signature, signature, sizeof(signature));
   bad_signature[10] += 1;  // Corrupt one byte of the signature.
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1,
                            bad_signature, sizeof(bad_signature),
                            public_key_info, sizeof(public_key_info));
 
@@ -260,20 +243,18 @@
   uint8_t bad_public_key_info[sizeof(public_key_info)];
   memcpy(bad_public_key_info, public_key_info, sizeof(public_key_info));
   bad_public_key_info[0] += 1;  // Corrupt part of the SPKI syntax.
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           bad_public_key_info, sizeof(bad_public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), bad_public_key_info,
+                           sizeof(bad_public_key_info));
   EXPECT_FALSE(ok);
 
   // Test 6: import a key with extra data.
   uint8_t long_public_key_info[sizeof(public_key_info) + 5];
   memset(long_public_key_info, 0, sizeof(long_public_key_info));
   memcpy(long_public_key_info, public_key_info, sizeof(public_key_info));
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           long_public_key_info, sizeof(long_public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), long_public_key_info,
+                           sizeof(long_public_key_info));
   EXPECT_FALSE(ok);
 }
 
@@ -1022,7 +1003,7 @@
   //       algorithm            AlgorithmIdentifier,
   //       subjectPublicKey     BIT STRING  }
   //
-  // The signature algorithm is specified as the following ASN.1 structure:
+  // The algorithm is specified as the following ASN.1 structure:
   //    AlgorithmIdentifier  ::=  SEQUENCE  {
   //        algorithm               OBJECT IDENTIFIER,
   //        parameters              ANY DEFINED BY algorithm OPTIONAL  }
diff --git a/crypto/symmetric_key_openssl.cc b/crypto/symmetric_key.cc
similarity index 68%
rename from crypto/symmetric_key_openssl.cc
rename to crypto/symmetric_key.cc
index 2c5358f..e3ecf62 100644
--- a/crypto/symmetric_key_openssl.cc
+++ b/crypto/symmetric_key.cc
@@ -10,9 +10,9 @@
 #include <stdint.h>
 
 #include <algorithm>
+#include <utility>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_util.h"
 #include "crypto/openssl_util.h"
 
@@ -23,37 +23,39 @@
 }
 
 // static
-SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
-                                              size_t key_size_in_bits) {
+std::unique_ptr<SymmetricKey> SymmetricKey::GenerateRandomKey(
+    Algorithm algorithm,
+    size_t key_size_in_bits) {
   DCHECK_EQ(AES, algorithm);
 
   // Whitelist supported key sizes to avoid accidentaly relying on
   // algorithms available in NSS but not BoringSSL and vice
   // versa. Note that BoringSSL does not support AES-192.
   if (key_size_in_bits != 128 && key_size_in_bits != 256)
-    return NULL;
+    return nullptr;
 
   size_t key_size_in_bytes = key_size_in_bits / 8;
   DCHECK_EQ(key_size_in_bits, key_size_in_bytes * 8);
 
   if (key_size_in_bytes == 0)
-    return NULL;
+    return nullptr;
 
   OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  scoped_ptr<SymmetricKey> key(new SymmetricKey);
+  std::unique_ptr<SymmetricKey> key(new SymmetricKey);
   uint8_t* key_data = reinterpret_cast<uint8_t*>(
       base::WriteInto(&key->key_, key_size_in_bytes + 1));
 
   int rv = RAND_bytes(key_data, static_cast<int>(key_size_in_bytes));
-  return rv == 1 ? key.release() : NULL;
+  return rv == 1 ? std::move(key) : nullptr;
 }
 
 // static
-SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
-                                                  const std::string& password,
-                                                  const std::string& salt,
-                                                  size_t iterations,
-                                                  size_t key_size_in_bits) {
+std::unique_ptr<SymmetricKey> SymmetricKey::DeriveKeyFromPassword(
+    Algorithm algorithm,
+    const std::string& password,
+    const std::string& salt,
+    size_t iterations,
+    size_t key_size_in_bits) {
   DCHECK(algorithm == AES || algorithm == HMAC_SHA1);
 
   if (algorithm == AES) {
@@ -61,40 +63,41 @@
     // algorithms available in NSS but not BoringSSL and vice
     // versa. Note that BoringSSL does not support AES-192.
     if (key_size_in_bits != 128 && key_size_in_bits != 256)
-      return NULL;
+      return nullptr;
   }
 
   size_t key_size_in_bytes = key_size_in_bits / 8;
   DCHECK_EQ(key_size_in_bits, key_size_in_bytes * 8);
 
   if (key_size_in_bytes == 0)
-    return NULL;
+    return nullptr;
 
   OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  scoped_ptr<SymmetricKey> key(new SymmetricKey);
+  std::unique_ptr<SymmetricKey> key(new SymmetricKey);
   uint8_t* key_data = reinterpret_cast<uint8_t*>(
       base::WriteInto(&key->key_, key_size_in_bytes + 1));
   int rv = PKCS5_PBKDF2_HMAC_SHA1(
       password.data(), password.length(),
-      reinterpret_cast<const uint8_t*>(salt.data()), salt.length(), iterations,
-      static_cast<int>(key_size_in_bytes), key_data);
-  return rv == 1 ? key.release() : NULL;
+      reinterpret_cast<const uint8_t*>(salt.data()), salt.length(),
+      static_cast<unsigned>(iterations),
+      key_size_in_bytes, key_data);
+  return rv == 1 ? std::move(key) : nullptr;
 }
 
 // static
-SymmetricKey* SymmetricKey::Import(Algorithm algorithm,
-                                   const std::string& raw_key) {
+std::unique_ptr<SymmetricKey> SymmetricKey::Import(Algorithm algorithm,
+                                                   const std::string& raw_key) {
   if (algorithm == AES) {
     // Whitelist supported key sizes to avoid accidentaly relying on
     // algorithms available in NSS but not BoringSSL and vice
     // versa. Note that BoringSSL does not support AES-192.
     if (raw_key.size() != 128/8 && raw_key.size() != 256/8)
-      return NULL;
+      return nullptr;
   }
 
-  scoped_ptr<SymmetricKey> key(new SymmetricKey);
+  std::unique_ptr<SymmetricKey> key(new SymmetricKey);
   key->key_ = raw_key;
-  return key.release();
+  return key;
 }
 
 bool SymmetricKey::GetRawKey(std::string* raw_key) {
@@ -102,4 +105,6 @@
   return true;
 }
 
+SymmetricKey::SymmetricKey() = default;
+
 }  // namespace crypto
diff --git a/crypto/symmetric_key.h b/crypto/symmetric_key.h
index 14f74ae..8862708 100644
--- a/crypto/symmetric_key.h
+++ b/crypto/symmetric_key.h
@@ -7,6 +7,7 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 
 #include "base/macros.h"
@@ -40,26 +41,28 @@
   // Generates a random key suitable to be used with |algorithm| and of
   // |key_size_in_bits| bits. |key_size_in_bits| must be a multiple of 8.
   // The caller is responsible for deleting the returned SymmetricKey.
-  static SymmetricKey* GenerateRandomKey(Algorithm algorithm,
-                                         size_t key_size_in_bits);
+  static std::unique_ptr<SymmetricKey> GenerateRandomKey(
+      Algorithm algorithm,
+      size_t key_size_in_bits);
 
   // Derives a key from the supplied password and salt using PBKDF2, suitable
   // for use with specified |algorithm|. Note |algorithm| is not the algorithm
   // used to derive the key from the password. |key_size_in_bits| must be a
   // multiple of 8. The caller is responsible for deleting the returned
   // SymmetricKey.
-  static SymmetricKey* DeriveKeyFromPassword(Algorithm algorithm,
-                                             const std::string& password,
-                                             const std::string& salt,
-                                             size_t iterations,
-                                             size_t key_size_in_bits);
+  static std::unique_ptr<SymmetricKey> DeriveKeyFromPassword(
+      Algorithm algorithm,
+      const std::string& password,
+      const std::string& salt,
+      size_t iterations,
+      size_t key_size_in_bits);
 
   // Imports an array of key bytes in |raw_key|. This key may have been
   // generated by GenerateRandomKey or DeriveKeyFromPassword and exported with
   // GetRawKey, or via another compatible method. The key must be of suitable
   // size for use with |algorithm|. The caller owns the returned SymmetricKey.
-  static SymmetricKey* Import(Algorithm algorithm, const std::string& raw_key);
-
+  static std::unique_ptr<SymmetricKey> Import(Algorithm algorithm,
+                                              const std::string& raw_key);
 #if defined(NACL_WIN64)
   HCRYPTKEY key() const { return key_.get(); }
 #elif defined(USE_OPENSSL)
diff --git a/crypto/symmetric_key_unittest.cc b/crypto/symmetric_key_unittest.cc
index ef8e7e1..d954761 100644
--- a/crypto/symmetric_key_unittest.cc
+++ b/crypto/symmetric_key_unittest.cc
@@ -4,26 +4,26 @@
 
 #include "crypto/symmetric_key.h"
 
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(SymmetricKeyTest, GenerateRandomKey) {
-  scoped_ptr<crypto::SymmetricKey> key(
+  std::unique_ptr<crypto::SymmetricKey> key(
       crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
-  ASSERT_TRUE(NULL != key.get());
+  ASSERT_TRUE(key);
   std::string raw_key;
   EXPECT_TRUE(key->GetRawKey(&raw_key));
   EXPECT_EQ(32U, raw_key.size());
 
   // Do it again and check that the keys are different.
   // (Note: this has a one-in-10^77 chance of failure!)
-  scoped_ptr<crypto::SymmetricKey> key2(
+  std::unique_ptr<crypto::SymmetricKey> key2(
       crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
-  ASSERT_TRUE(NULL != key2.get());
+  ASSERT_TRUE(key2);
   std::string raw_key2;
   EXPECT_TRUE(key2->GetRawKey(&raw_key2));
   EXPECT_EQ(32U, raw_key2.size());
@@ -31,15 +31,15 @@
 }
 
 TEST(SymmetricKeyTest, ImportGeneratedKey) {
-  scoped_ptr<crypto::SymmetricKey> key1(
+  std::unique_ptr<crypto::SymmetricKey> key1(
       crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
-  ASSERT_TRUE(NULL != key1.get());
+  ASSERT_TRUE(key1);
   std::string raw_key1;
   EXPECT_TRUE(key1->GetRawKey(&raw_key1));
 
-  scoped_ptr<crypto::SymmetricKey> key2(
+  std::unique_ptr<crypto::SymmetricKey> key2(
       crypto::SymmetricKey::Import(crypto::SymmetricKey::AES, raw_key1));
-  ASSERT_TRUE(NULL != key2.get());
+  ASSERT_TRUE(key2);
 
   std::string raw_key2;
   EXPECT_TRUE(key2->GetRawKey(&raw_key2));
@@ -48,16 +48,16 @@
 }
 
 TEST(SymmetricKeyTest, ImportDerivedKey) {
-  scoped_ptr<crypto::SymmetricKey> key1(
+  std::unique_ptr<crypto::SymmetricKey> key1(
       crypto::SymmetricKey::DeriveKeyFromPassword(
           crypto::SymmetricKey::HMAC_SHA1, "password", "somesalt", 1024, 160));
-  ASSERT_TRUE(NULL != key1.get());
+  ASSERT_TRUE(key1);
   std::string raw_key1;
   EXPECT_TRUE(key1->GetRawKey(&raw_key1));
 
-  scoped_ptr<crypto::SymmetricKey> key2(
+  std::unique_ptr<crypto::SymmetricKey> key2(
       crypto::SymmetricKey::Import(crypto::SymmetricKey::HMAC_SHA1, raw_key1));
-  ASSERT_TRUE(NULL != key2.get());
+  ASSERT_TRUE(key2);
 
   std::string raw_key2;
   EXPECT_TRUE(key2->GetRawKey(&raw_key2));
@@ -80,21 +80,11 @@
 
 TEST_P(SymmetricKeyDeriveKeyFromPasswordTest, DeriveKeyFromPassword) {
   PBKDF2TestVector test_data(GetParam());
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The OS X crypto libraries have minimum salt and iteration requirements
-  // so some of the tests below will cause them to barf. Skip these.
-  if (strlen(test_data.salt) < 8 || test_data.rounds < 1000) {
-    VLOG(1) << "Skipped test vector for " << test_data.expected;
-    return;
-  }
-#endif  // OS_MACOSX
-
-  scoped_ptr<crypto::SymmetricKey> key(
+  std::unique_ptr<crypto::SymmetricKey> key(
       crypto::SymmetricKey::DeriveKeyFromPassword(
-          test_data.algorithm,
-          test_data.password, test_data.salt,
+          test_data.algorithm, test_data.password, test_data.salt,
           test_data.rounds, test_data.key_size_in_bits));
-  ASSERT_TRUE(NULL != key.get());
+  ASSERT_TRUE(key);
 
   std::string raw_key;
   key->GetRawKey(&raw_key);
diff --git a/crypto/symmetric_key_win.cc b/crypto/symmetric_key_win.cc
deleted file mode 100644
index ac8e614..0000000
--- a/crypto/symmetric_key_win.cc
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/symmetric_key.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <vector>
-
-// TODO(wtc): replace scoped_array by std::vector.
-#include "base/memory/scoped_ptr.h"
-#include "base/sys_byteorder.h"
-
-namespace crypto {
-
-namespace {
-
-// The following is a non-public Microsoft header documented in MSDN under
-// CryptImportKey / CryptExportKey. Following the header is the byte array of
-// the actual plaintext key.
-struct PlaintextBlobHeader {
-  BLOBHEADER hdr;
-  DWORD cbKeySize;
-};
-
-// CryptoAPI makes use of three distinct ALG_IDs for AES, rather than just
-// CALG_AES (which exists, but depending on the functions you are calling, may
-// result in function failure, whereas the subtype would succeed).
-ALG_ID GetAESAlgIDForKeySize(size_t key_size_in_bits) {
-  // Only AES-128/-192/-256 is supported in CryptoAPI.
-  switch (key_size_in_bits) {
-    case 128:
-      return CALG_AES_128;
-    case 192:
-      return CALG_AES_192;
-    case 256:
-      return CALG_AES_256;
-    default:
-      NOTREACHED();
-      return 0;
-  }
-}
-
-// Imports a raw/plaintext key of |key_size| stored in |*key_data| into a new
-// key created for the specified |provider|. |alg| contains the algorithm of
-// the key being imported.
-// If |key_data| is intended to be used as an HMAC key, then |alg| should be
-// CALG_HMAC.
-// If successful, returns true and stores the imported key in |*key|.
-// TODO(wtc): use this function in hmac_win.cc.
-bool ImportRawKey(HCRYPTPROV provider,
-                  ALG_ID alg,
-                  const void* key_data, size_t key_size,
-                  ScopedHCRYPTKEY* key) {
-  DCHECK_GT(key_size, 0u);
-
-  DWORD actual_size =
-      static_cast<DWORD>(sizeof(PlaintextBlobHeader) + key_size);
-  std::vector<BYTE> tmp_data(actual_size);
-  BYTE* actual_key = &tmp_data[0];
-  memcpy(actual_key + sizeof(PlaintextBlobHeader), key_data, key_size);
-  PlaintextBlobHeader* key_header =
-      reinterpret_cast<PlaintextBlobHeader*>(actual_key);
-  memset(key_header, 0, sizeof(PlaintextBlobHeader));
-
-  key_header->hdr.bType = PLAINTEXTKEYBLOB;
-  key_header->hdr.bVersion = CUR_BLOB_VERSION;
-  key_header->hdr.aiKeyAlg = alg;
-
-  key_header->cbKeySize = static_cast<DWORD>(key_size);
-
-  HCRYPTKEY unsafe_key = NULL;
-  DWORD flags = CRYPT_EXPORTABLE;
-  if (alg == CALG_HMAC) {
-    // Though it may appear odd that IPSEC and RC2 are being used, this is
-    // done in accordance with Microsoft's FIPS 140-2 Security Policy for the
-    // RSA Enhanced Provider, as the approved means of using arbitrary HMAC
-    // key material.
-    key_header->hdr.aiKeyAlg = CALG_RC2;
-    flags |= CRYPT_IPSEC_HMAC_KEY;
-  }
-
-  BOOL ok =
-      CryptImportKey(provider, actual_key, actual_size, 0, flags, &unsafe_key);
-
-  // Clean up the temporary copy of key, regardless of whether it was imported
-  // successfully or not.
-  SecureZeroMemory(actual_key, actual_size);
-
-  if (!ok)
-    return false;
-
-  key->reset(unsafe_key);
-  return true;
-}
-
-// Attempts to generate a random AES key of |key_size_in_bits|. Returns true
-// if generation is successful, storing the generated key in |*key| and the
-// key provider (CSP) in |*provider|.
-bool GenerateAESKey(size_t key_size_in_bits,
-                    ScopedHCRYPTPROV* provider,
-                    ScopedHCRYPTKEY* key) {
-  DCHECK(provider);
-  DCHECK(key);
-
-  ALG_ID alg = GetAESAlgIDForKeySize(key_size_in_bits);
-  if (alg == 0)
-    return false;
-
-  ScopedHCRYPTPROV safe_provider;
-  // Note: The only time NULL is safe to be passed as pszContainer is when
-  // dwFlags contains CRYPT_VERIFYCONTEXT, as all keys generated and/or used
-  // will be treated as ephemeral keys and not persisted.
-  BOOL ok = CryptAcquireContext(safe_provider.receive(), NULL, NULL,
-                                PROV_RSA_AES, CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return false;
-
-  ScopedHCRYPTKEY safe_key;
-  // In the FIPS 140-2 Security Policy for CAPI on XP/Vista+, Microsoft notes
-  // that CryptGenKey makes use of the same functionality exposed via
-  // CryptGenRandom. The reason this is being used, as opposed to
-  // CryptGenRandom and CryptImportKey is for compliance with the security
-  // policy
-  ok = CryptGenKey(safe_provider.get(), alg, CRYPT_EXPORTABLE,
-                   safe_key.receive());
-  if (!ok)
-    return false;
-
-  key->swap(safe_key);
-  provider->swap(safe_provider);
-
-  return true;
-}
-
-// Returns true if the HMAC key size meets the requirement of FIPS 198
-// Section 3.  |alg| is the hash function used in the HMAC.
-bool CheckHMACKeySize(size_t key_size_in_bits, ALG_ID alg) {
-  DWORD hash_size = 0;
-  switch (alg) {
-    case CALG_SHA1:
-      hash_size = 20;
-      break;
-    case CALG_SHA_256:
-      hash_size = 32;
-      break;
-    case CALG_SHA_384:
-      hash_size = 48;
-      break;
-    case CALG_SHA_512:
-      hash_size = 64;
-      break;
-  }
-  if (hash_size == 0)
-    return false;
-
-  // An HMAC key must be >= L/2, where L is the output size of the hash
-  // function being used.
-  return (key_size_in_bits >= (hash_size / 2 * 8) &&
-         (key_size_in_bits % 8) == 0);
-}
-
-// Attempts to generate a random, |key_size_in_bits|-long HMAC key, for use
-// with the hash function |alg|.
-// |key_size_in_bits| must be >= 1/2 the hash size of |alg| for security.
-// Returns true if generation is successful, storing the generated key in
-// |*key| and the key provider (CSP) in |*provider|.
-bool GenerateHMACKey(size_t key_size_in_bits,
-                     ALG_ID alg,
-                     ScopedHCRYPTPROV* provider,
-                     ScopedHCRYPTKEY* key,
-                     scoped_ptr<BYTE[]>* raw_key) {
-  DCHECK(provider);
-  DCHECK(key);
-  DCHECK(raw_key);
-
-  if (!CheckHMACKeySize(key_size_in_bits, alg))
-    return false;
-
-  ScopedHCRYPTPROV safe_provider;
-  // See comment in GenerateAESKey as to why NULL is acceptable for the
-  // container name.
-  BOOL ok = CryptAcquireContext(safe_provider.receive(), NULL, NULL,
-                                PROV_RSA_FULL, CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return false;
-
-  DWORD key_size_in_bytes = static_cast<DWORD>(key_size_in_bits / 8);
-  scoped_ptr<BYTE[]> random(new BYTE[key_size_in_bytes]);
-  ok = CryptGenRandom(safe_provider, key_size_in_bytes, random.get());
-  if (!ok)
-    return false;
-
-  ScopedHCRYPTKEY safe_key;
-  bool rv = ImportRawKey(safe_provider, CALG_HMAC, random.get(),
-                         key_size_in_bytes, &safe_key);
-  if (rv) {
-    key->swap(safe_key);
-    provider->swap(safe_provider);
-    raw_key->swap(random);
-  }
-
-  SecureZeroMemory(random.get(), key_size_in_bytes);
-  return rv;
-}
-
-// Attempts to create an HMAC hash instance using the specified |provider|
-// and |key|. The inner hash function will be |hash_alg|. If successful,
-// returns true and stores the hash in |*hash|.
-// TODO(wtc): use this function in hmac_win.cc.
-bool CreateHMACHash(HCRYPTPROV provider,
-                    HCRYPTKEY key,
-                    ALG_ID hash_alg,
-                    ScopedHCRYPTHASH* hash) {
-  ScopedHCRYPTHASH safe_hash;
-  BOOL ok = CryptCreateHash(provider, CALG_HMAC, key, 0, safe_hash.receive());
-  if (!ok)
-    return false;
-
-  HMAC_INFO hmac_info;
-  memset(&hmac_info, 0, sizeof(hmac_info));
-  hmac_info.HashAlgid = hash_alg;
-
-  ok = CryptSetHashParam(safe_hash, HP_HMAC_INFO,
-                         reinterpret_cast<const BYTE*>(&hmac_info), 0);
-  if (!ok)
-    return false;
-
-  hash->swap(safe_hash);
-  return true;
-}
-
-// Computes a block of the derived key using the PBKDF2 function F for the
-// specified |block_index| using the PRF |hash|, writing the output to
-// |output_buf|.
-// |output_buf| must have enough space to accomodate the output of the PRF
-// specified by |hash|.
-// Returns true if the block was successfully computed.
-bool ComputePBKDF2Block(HCRYPTHASH hash,
-                        DWORD hash_size,
-                        const std::string& salt,
-                        size_t iterations,
-                        uint32_t block_index,
-                        BYTE* output_buf) {
-  // From RFC 2898:
-  // 3. <snip> The function F is defined as the exclusive-or sum of the first
-  //    c iterates of the underlying pseudorandom function PRF applied to the
-  //    password P and the concatenation of the salt S and the block index i:
-  //      F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
-  //    where
-  //      U_1 = PRF(P, S || INT (i))
-  //      U_2 = PRF(P, U_1)
-  //      ...
-  //      U_c = PRF(P, U_{c-1})
-  ScopedHCRYPTHASH safe_hash;
-  BOOL ok = CryptDuplicateHash(hash, NULL, 0, safe_hash.receive());
-  if (!ok)
-    return false;
-
-  // Iteration U_1: Compute PRF for S.
-  ok = CryptHashData(safe_hash, reinterpret_cast<const BYTE*>(salt.data()),
-                     static_cast<DWORD>(salt.size()), 0);
-  if (!ok)
-    return false;
-
-  // Iteration U_1: and append (big-endian) INT (i).
-  uint32_t big_endian_block_index = base::HostToNet32(block_index);
-  ok = CryptHashData(safe_hash,
-                     reinterpret_cast<BYTE*>(&big_endian_block_index),
-                     sizeof(big_endian_block_index), 0);
-
-  std::vector<BYTE> hash_value(hash_size);
-
-  DWORD size = hash_size;
-  ok = CryptGetHashParam(safe_hash, HP_HASHVAL, &hash_value[0], &size, 0);
-  if (!ok  || size != hash_size)
-    return false;
-
-  memcpy(output_buf, &hash_value[0], hash_size);
-
-  // Iteration 2 - c: Compute U_{iteration} by applying the PRF to
-  // U_{iteration - 1}, then xor the resultant hash with |output|, which
-  // contains U_1 ^ U_2 ^ ... ^ U_{iteration - 1}.
-  for (size_t iteration = 2; iteration <= iterations; ++iteration) {
-    safe_hash.reset();
-    ok = CryptDuplicateHash(hash, NULL, 0, safe_hash.receive());
-    if (!ok)
-      return false;
-
-    ok = CryptHashData(safe_hash, &hash_value[0], hash_size, 0);
-    if (!ok)
-      return false;
-
-    size = hash_size;
-    ok = CryptGetHashParam(safe_hash, HP_HASHVAL, &hash_value[0], &size, 0);
-    if (!ok || size != hash_size)
-      return false;
-
-    for (DWORD i = 0; i < hash_size; ++i)
-      output_buf[i] ^= hash_value[i];
-  }
-
-  return true;
-}
-
-}  // namespace
-
-SymmetricKey::~SymmetricKey() {
-  // TODO(wtc): create a "secure" string type that zeroes itself in the
-  // destructor.
-  if (!raw_key_.empty())
-    SecureZeroMemory(const_cast<char *>(raw_key_.data()), raw_key_.size());
-}
-
-// static
-SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
-                                              size_t key_size_in_bits) {
-  DCHECK_GE(key_size_in_bits, 8u);
-
-  ScopedHCRYPTPROV provider;
-  ScopedHCRYPTKEY key;
-
-  bool ok = false;
-  scoped_ptr<BYTE[]> raw_key;
-
-  switch (algorithm) {
-    case AES:
-      ok = GenerateAESKey(key_size_in_bits, &provider, &key);
-      break;
-    case HMAC_SHA1:
-      ok = GenerateHMACKey(key_size_in_bits, CALG_SHA1, &provider,
-                           &key, &raw_key);
-      break;
-  }
-
-  if (!ok) {
-    NOTREACHED();
-    return NULL;
-  }
-
-  size_t key_size_in_bytes = key_size_in_bits / 8;
-  if (raw_key == NULL)
-    key_size_in_bytes = 0;
-
-  SymmetricKey* result = new SymmetricKey(provider.release(),
-                                          key.release(),
-                                          raw_key.get(),
-                                          key_size_in_bytes);
-  if (raw_key != NULL)
-    SecureZeroMemory(raw_key.get(), key_size_in_bytes);
-
-  return result;
-}
-
-// static
-SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
-                                                  const std::string& password,
-                                                  const std::string& salt,
-                                                  size_t iterations,
-                                                  size_t key_size_in_bits) {
-  // CryptoAPI lacks routines to perform PBKDF2 derivation as specified
-  // in RFC 2898, so it must be manually implemented. Only HMAC-SHA1 is
-  // supported as the PRF.
-
-  // While not used until the end, sanity-check the input before proceeding
-  // with the expensive computation.
-  DWORD provider_type = 0;
-  ALG_ID alg = 0;
-  switch (algorithm) {
-    case AES:
-      provider_type = PROV_RSA_AES;
-      alg = GetAESAlgIDForKeySize(key_size_in_bits);
-      break;
-    case HMAC_SHA1:
-      provider_type = PROV_RSA_FULL;
-      alg = CALG_HMAC;
-      break;
-    default:
-      NOTREACHED();
-      break;
-  }
-  if (provider_type == 0 || alg == 0)
-    return NULL;
-
-  ScopedHCRYPTPROV provider;
-  BOOL ok = CryptAcquireContext(provider.receive(), NULL, NULL, provider_type,
-                                CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return NULL;
-
-  // Convert the user password into a key suitable to be fed into the PRF
-  // function.
-  ScopedHCRYPTKEY password_as_key;
-  BYTE* password_as_bytes =
-      const_cast<BYTE*>(reinterpret_cast<const BYTE*>(password.data()));
-  if (!ImportRawKey(provider, CALG_HMAC, password_as_bytes,
-                    password.size(), &password_as_key))
-    return NULL;
-
-  // Configure the PRF function. Only HMAC variants are supported, with the
-  // only hash function supported being SHA1.
-  // TODO(rsleevi): Support SHA-256 on XP SP3+.
-  ScopedHCRYPTHASH prf;
-  if (!CreateHMACHash(provider, password_as_key, CALG_SHA1, &prf))
-    return NULL;
-
-  DWORD hLen = 0;
-  DWORD param_size = sizeof(hLen);
-  ok = CryptGetHashParam(prf, HP_HASHSIZE,
-                         reinterpret_cast<BYTE*>(&hLen), &param_size, 0);
-  if (!ok || hLen == 0)
-    return NULL;
-
-  // 1. If dkLen > (2^32 - 1) * hLen, output "derived key too long" and stop.
-  size_t dkLen = key_size_in_bits / 8;
-  DCHECK_GT(dkLen, 0u);
-
-  if ((dkLen / hLen) > 0xFFFFFFFF) {
-    DLOG(ERROR) << "Derived key too long.";
-    return NULL;
-  }
-
-  // 2. Let l be the number of hLen-octet blocks in the derived key,
-  //    rounding up, and let r be the number of octets in the last
-  //    block:
-  size_t L = (dkLen + hLen - 1) / hLen;
-  DCHECK_GT(L, 0u);
-
-  size_t total_generated_size = L * hLen;
-  std::vector<BYTE> generated_key(total_generated_size);
-  BYTE* block_offset = &generated_key[0];
-
-  // 3. For each block of the derived key apply the function F defined below
-  //    to the password P, the salt S, the iteration count c, and the block
-  //    index to compute the block:
-  //    T_1 = F (P, S, c, 1)
-  //    T_2 = F (P, S, c, 2)
-  //    ...
-  //    T_l = F (P, S, c, l)
-  // <snip>
-  // 4. Concatenate the blocks and extract the first dkLen octets to produce
-  //    a derived key DK:
-  //    DK = T_1 || T_2 || ... || T_l<0..r-1>
-  for (uint32_t block_index = 1; block_index <= L; ++block_index) {
-    if (!ComputePBKDF2Block(prf, hLen, salt, iterations, block_index,
-                            block_offset))
-        return NULL;
-    block_offset += hLen;
-  }
-
-  // Convert the derived key bytes into a key handle for the desired algorithm.
-  ScopedHCRYPTKEY key;
-  if (!ImportRawKey(provider, alg, &generated_key[0], dkLen, &key))
-    return NULL;
-
-  SymmetricKey* result = new SymmetricKey(provider.release(), key.release(),
-                                          &generated_key[0], dkLen);
-
-  SecureZeroMemory(&generated_key[0], total_generated_size);
-
-  return result;
-}
-
-// static
-SymmetricKey* SymmetricKey::Import(Algorithm algorithm,
-                                   const std::string& raw_key) {
-  DWORD provider_type = 0;
-  ALG_ID alg = 0;
-  switch (algorithm) {
-    case AES:
-      provider_type = PROV_RSA_AES;
-      alg = GetAESAlgIDForKeySize(raw_key.size() * 8);
-      break;
-    case HMAC_SHA1:
-      provider_type = PROV_RSA_FULL;
-      alg = CALG_HMAC;
-      break;
-    default:
-      NOTREACHED();
-      break;
-  }
-  if (provider_type == 0 || alg == 0)
-    return NULL;
-
-  ScopedHCRYPTPROV provider;
-  BOOL ok = CryptAcquireContext(provider.receive(), NULL, NULL, provider_type,
-                                CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return NULL;
-
-  ScopedHCRYPTKEY key;
-  if (!ImportRawKey(provider, alg, raw_key.data(), raw_key.size(), &key))
-    return NULL;
-
-  return new SymmetricKey(provider.release(), key.release(),
-                          raw_key.data(), raw_key.size());
-}
-
-bool SymmetricKey::GetRawKey(std::string* raw_key) {
-  // Short circuit for when the key was supplied to the constructor.
-  if (!raw_key_.empty()) {
-    *raw_key = raw_key_;
-    return true;
-  }
-
-  DWORD size = 0;
-  BOOL ok = CryptExportKey(key_, 0, PLAINTEXTKEYBLOB, 0, NULL, &size);
-  if (!ok)
-    return false;
-
-  std::vector<BYTE> result(size);
-
-  ok = CryptExportKey(key_, 0, PLAINTEXTKEYBLOB, 0, &result[0], &size);
-  if (!ok)
-    return false;
-
-  PlaintextBlobHeader* header =
-      reinterpret_cast<PlaintextBlobHeader*>(&result[0]);
-  raw_key->assign(reinterpret_cast<char*>(&result[sizeof(*header)]),
-                  header->cbKeySize);
-
-  SecureZeroMemory(&result[0], size);
-
-  return true;
-}
-
-SymmetricKey::SymmetricKey(HCRYPTPROV provider,
-                           HCRYPTKEY key,
-                           const void* key_data, size_t key_size_in_bytes)
-    : provider_(provider), key_(key) {
-  if (key_data) {
-    raw_key_.assign(reinterpret_cast<const char*>(key_data),
-                    key_size_in_bytes);
-  }
-}
-
-}  // namespace crypto
diff --git a/crypto/third_party/nss/LICENSE b/crypto/third_party/nss/LICENSE
deleted file mode 100644
index 0367164..0000000
--- a/crypto/third_party/nss/LICENSE
+++ /dev/null
@@ -1,35 +0,0 @@
-/* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1994-2000
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
diff --git a/crypto/third_party/nss/README.chromium b/crypto/third_party/nss/README.chromium
deleted file mode 100644
index 1a63665..0000000
--- a/crypto/third_party/nss/README.chromium
+++ /dev/null
@@ -1,18 +0,0 @@
-Name: Network Security Services (NSS)
-URL: http://www.mozilla.org/projects/security/pki/nss/
-License: MPL 1.1/GPL 2.0/LGPL 2.1
-
-We extracted the SHA-256 source files, eliminated unneeded dependencies,
-deleted or commented out unused code, and tweaked them for Chrome's source
-tree.  sha512.c is renamed sha512.cc so that it can include Chrome's C++
-header "base/basictypes.h".  We define NOUNROLL256 to reduce the object code
-size.
-
-In blapi.h and sha512.cc, replaced uint32 by unsigned int so that they can
-be compiled with -DNO_NSPR_10_SUPPORT.  NO_NSPR_10_SUPPORT turns off the
-definition of the NSPR 1.0 types int8 - int64 and uint8 - uint64 to avoid
-conflict with the same-named types defined in "base/basictypes.h".
-
-rsawrapr.c is copied from nss/lib/softoken/rsawrapr.c, with
-HASH_GetRawHashObject changed to HASH_GetHashObject. It contains the
-emsa_pss_verify function for verifying RSA-PSS signatures.
diff --git a/crypto/third_party/nss/pk11akey.cc b/crypto/third_party/nss/pk11akey.cc
deleted file mode 100644
index 4db582f..0000000
--- a/crypto/third_party/nss/pk11akey.cc
+++ /dev/null
@@ -1,98 +0,0 @@
- /* ***** BEGIN LICENSE BLOCK *****
-  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
-  *
-  * The contents of this file are subject to the Mozilla Public License Version
-  * 1.1 (the "License"); you may not use this file except in compliance with
-  * the License. You may obtain a copy of the License at
-  * http://www.mozilla.org/MPL/
-  *
-  * Software distributed under the License is distributed on an "AS IS" basis,
-  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-  * for the specific language governing rights and limitations under the
-  * License.
-  *
-  * The Original Code is the Netscape security libraries.
-  *
-  * The Initial Developer of the Original Code is
-  * Netscape Communications Corporation.
-  * Portions created by the Initial Developer are Copyright (C) 1994-2000
-  * the Initial Developer. All Rights Reserved.
-  *
-  * Contributor(s):
-  *   Dr Stephen Henson <stephen.henson@gemplus.com>
-  *   Dr Vipul Gupta <vipul.gupta@sun.com>, and
-  *   Douglas Stebila <douglas@stebila.ca>, Sun Microsystems Laboratories
-  *
-  * Alternatively, the contents of this file may be used under the terms of
-  * either the GNU General Public License Version 2 or later (the "GPL"), or
-  * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-  * in which case the provisions of the GPL or the LGPL are applicable instead
-  * of those above. If you wish to allow use of your version of this file only
-  * under the terms of either the GPL or the LGPL, and not to allow others to
-  * use your version of this file under the terms of the MPL, indicate your
-  * decision by deleting the provisions above and replace them with the notice
-  * and other provisions required by the GPL or the LGPL. If you do not delete
-  * the provisions above, a recipient may use your version of this file under
-  * the terms of any one of the MPL, the GPL or the LGPL.
-  *
-  * ***** END LICENSE BLOCK ***** */
-
-#include "crypto/third_party/nss/chromium-nss.h"
-
-#include <pk11pub.h>
-
-#include "base/logging.h"
-
-// Based on PK11_ImportEncryptedPrivateKeyInfo function in
-// mozilla/security/nss/lib/pk11wrap/pk11akey.c.
-SECStatus ImportEncryptedECPrivateKeyInfoAndReturnKey(
-    PK11SlotInfo* slot,
-    SECKEYEncryptedPrivateKeyInfo* epki,
-    SECItem* password,
-    SECItem* nickname,
-    SECItem* public_value,
-    PRBool permanent,
-    PRBool sensitive,
-    SECKEYPrivateKey** private_key,
-    void* wincx) {
-  SECItem* crypto_param = NULL;
-
-  CK_ATTRIBUTE_TYPE usage = CKA_SIGN;
-
-  PK11SymKey* key = PK11_PBEKeyGen(slot,
-                                   &epki->algorithm,
-                                   password,
-                                   PR_FALSE,  // faulty3DES
-                                   wincx);
-  if (key == NULL) {
-    DLOG(ERROR) << "PK11_PBEKeyGen: " << PORT_GetError();
-    return SECFailure;
-  }
-
-  CK_MECHANISM_TYPE crypto_mech_type = PK11_GetPBECryptoMechanism(
-      &epki->algorithm, &crypto_param, password);
-  if (crypto_mech_type == CKM_INVALID_MECHANISM) {
-    DLOG(ERROR) << "PK11_GetPBECryptoMechanism: " << PORT_GetError();
-    PK11_FreeSymKey(key);
-    return SECFailure;
-  }
-
-  crypto_mech_type = PK11_GetPadMechanism(crypto_mech_type);
-
-  *private_key = PK11_UnwrapPrivKey(slot, key, crypto_mech_type, crypto_param,
-                                    &epki->encryptedData, nickname,
-                                    public_value, permanent, sensitive, CKK_EC,
-                                    &usage, 1, wincx);
-
-  if (crypto_param != NULL)
-    SECITEM_ZfreeItem(crypto_param, PR_TRUE);
-
-  PK11_FreeSymKey(key);
-
-  if (!*private_key) {
-    DLOG(ERROR) << "PK11_UnwrapPrivKey: " << PORT_GetError();
-    return SECFailure;
-  }
-
-  return SECSuccess;
-}
diff --git a/crypto/third_party/nss/secsign.cc b/crypto/third_party/nss/secsign.cc
deleted file mode 100644
index c9816fb..0000000
--- a/crypto/third_party/nss/secsign.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Signature stuff.
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1994-2000
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *   Dr Vipul Gupta <vipul.gupta@sun.com>, Sun Microsystems Laboratories
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-#include "crypto/third_party/nss/chromium-nss.h"
-
-#include <vector>
-
-#include <cryptohi.h>
-#include <pk11pub.h>
-#include <secerr.h>
-#include <sechash.h>
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "build/build_config.h"
-
-SECStatus DerSignData(PLArenaPool *arena,
-                      SECItem *result,
-                      SECItem *input,
-                      SECKEYPrivateKey *key,
-                      SECOidTag algo_id) {
-  if (key->keyType != ecKey) {
-    return SEC_DerSignData(arena, result, input->data, input->len, key,
-                           algo_id);
-  }
-
-  // NSS has a private function sec_DecodeSigAlg it uses to figure out the
-  // correct hash from the algorithm id.
-  HASH_HashType hash_type;
-  switch (algo_id) {
-    case SEC_OID_ANSIX962_ECDSA_SHA1_SIGNATURE:
-      hash_type = HASH_AlgSHA1;
-      break;
-#ifdef SHA224_LENGTH
-    case SEC_OID_ANSIX962_ECDSA_SHA224_SIGNATURE:
-      hash_type = HASH_AlgSHA224;
-      break;
-#endif
-    case SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE:
-      hash_type = HASH_AlgSHA256;
-      break;
-    case SEC_OID_ANSIX962_ECDSA_SHA384_SIGNATURE:
-      hash_type = HASH_AlgSHA384;
-      break;
-    case SEC_OID_ANSIX962_ECDSA_SHA512_SIGNATURE:
-      hash_type = HASH_AlgSHA512;
-      break;
-    default:
-      PORT_SetError(SEC_ERROR_INVALID_ALGORITHM);
-      return SECFailure;
-  }
-
-  // Hash the input.
-  std::vector<uint8_t> hash_data(HASH_ResultLen(hash_type));
-  SECStatus rv = HASH_HashBuf(
-      hash_type, &hash_data[0], input->data, input->len);
-  if (rv != SECSuccess)
-    return rv;
-  SECItem hash = {siBuffer, &hash_data[0], 
-		  static_cast<unsigned int>(hash_data.size())};
-
-  // Compute signature of hash.
-  int signature_len = PK11_SignatureLen(key);
-  std::vector<uint8_t> signature_data(signature_len);
-  SECItem sig = {siBuffer, &signature_data[0], 
-		 static_cast<unsigned int>(signature_len)};
-  rv = PK11_Sign(key, &sig, &hash);
-  if (rv != SECSuccess)
-    return rv;
-
-  CERTSignedData sd;
-  PORT_Memset(&sd, 0, sizeof(sd));
-  // Fill in tbsCertificate.
-  sd.data.data = (unsigned char*) input->data;
-  sd.data.len = input->len;
-
-  // Fill in signatureAlgorithm.
-  rv = SECOID_SetAlgorithmID(arena, &sd.signatureAlgorithm, algo_id, 0);
-  if (rv != SECSuccess)
-    return rv;
-
-  // Fill in signatureValue.
-  rv = DSAU_EncodeDerSigWithLen(&sd.signature, &sig, sig.len);
-  if (rv != SECSuccess)
-    return rv;
-  sd.signature.len <<=  3;  // Convert to bit string.
-
-  // DER encode the signed data object.
-  void* encode_result = SEC_ASN1EncodeItem(
-      arena, result, &sd, SEC_ASN1_GET(CERT_SignedDataTemplate));
-
-  PORT_Free(sd.signature.data);
-
-  return encode_result ? SECSuccess : SECFailure;
-}
diff --git a/crypto/third_party/nss/sha512.cc b/crypto/third_party/nss/sha512.cc
index 5ef4e50..78950cb 100644
--- a/crypto/third_party/nss/sha512.cc
+++ b/crypto/third_party/nss/sha512.cc
@@ -471,6 +471,11 @@
 	*digestLen = padLen;
 }
 
+void SHA256_Clone(SHA256Context* dest, SHA256Context* src)
+{
+  memcpy(dest, src, sizeof *dest);
+}
+
 /* Comment out unused code, mostly the SHA384 and SHA512 implementations. */
 #if 0
 SECStatus
@@ -519,12 +524,6 @@
     return ctx;
 }
 
-void SHA256_Clone(SHA256Context *dest, SHA256Context *src)
-{
-    memcpy(dest, src, sizeof *dest);
-}
-
-
 /* ======= SHA512 and SHA384 common constants and defines ================= */
 
 /* common #defines for SHA512 and SHA384 */
diff --git a/crypto/wincrypt_shim.h b/crypto/wincrypt_shim.h
index 799ac49..48d4b5c 100644
--- a/crypto/wincrypt_shim.h
+++ b/crypto/wincrypt_shim.h
@@ -22,4 +22,4 @@
 #define WINCRYPT_X509_EXTENSIONS ((LPCSTR) 5)
 #define WINCRYPT_X509_NAME ((LPCSTR) 7)
 
-#endif  // NET_CRYPTO_WINCRYPT_SHIM_H_
\ No newline at end of file
+#endif  // NET_CRYPTO_WINCRYPT_SHIM_H_
diff --git a/dbus/BUILD.gn b/dbus/BUILD.gn
index dbeee0c..28efb93 100644
--- a/dbus/BUILD.gn
+++ b/dbus/BUILD.gn
@@ -2,9 +2,12 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import("//build/config/features.gni")
 import("//testing/test.gni")
 import("//third_party/protobuf/proto_library.gni")
 
+assert(use_dbus)
+
 component("dbus") {
   sources = [
     "bus.cc",
@@ -45,7 +48,7 @@
     "//base",
   ]
 
-  public_configs = [ "//build/config/linux:dbus" ]
+  public_configs = [ "//build/config/linux/dbus" ]
 }
 
 proto_library("test_proto") {
@@ -56,7 +59,7 @@
 
 # This target contains mocks that can be used to write unit tests without
 # issuing actual D-Bus calls.
-source_set("test_support") {
+static_library("test_support") {
   testonly = true
   sources = [
     "mock_bus.cc",
@@ -76,7 +79,7 @@
     "//testing/gmock",
   ]
 
-  configs += [ "//build/config/linux:dbus" ]
+  configs += [ "//build/config/linux/dbus" ]
 }
 
 test("dbus_unittests") {
@@ -109,7 +112,7 @@
     "//third_party/protobuf:protobuf_lite",
   ]
 
-  configs += [ "//build/config/linux:dbus" ]
+  configs += [ "//build/config/linux/dbus" ]
 }
 
 executable("dbus_test_server") {
@@ -127,5 +130,5 @@
     "//build/config/sanitizers:deps",
   ]
 
-  configs += [ "//build/config/linux:dbus" ]
+  configs += [ "//build/config/linux/dbus" ]
 }
diff --git a/dbus/OWNERS b/dbus/OWNERS
index fc425e6..04931c3 100644
--- a/dbus/OWNERS
+++ b/dbus/OWNERS
@@ -1,4 +1,3 @@
 hashimoto@chromium.org
-keybuk@chromium.org
 satorux@chromium.org
 stevenjb@chromium.org
diff --git a/dbus/bus.cc b/dbus/bus.cc
index 8781eae..57834d3 100644
--- a/dbus/bus.cc
+++ b/dbus/bus.cc
@@ -13,6 +13,7 @@
 #include "base/strings/stringprintf.h"
 #include "base/threading/thread.h"
 #include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
 #include "dbus/exported_object.h"
 #include "dbus/message.h"
@@ -78,13 +79,13 @@
 
  private:
   // Implement MessagePumpLibevent::Watcher.
-  void OnFileCanReadWithoutBlocking(int /* file_descriptor */) override {
+  void OnFileCanReadWithoutBlocking(int /*file_descriptor*/) override {
     const bool success = dbus_watch_handle(raw_watch_, DBUS_WATCH_READABLE);
     CHECK(success) << "Unable to allocate memory";
   }
 
   // Implement MessagePumpLibevent::Watcher.
-  void OnFileCanWriteWithoutBlocking(int /* file_descriptor */) override {
+  void OnFileCanWriteWithoutBlocking(int /*file_descriptor*/) override {
     const bool success = dbus_watch_handle(raw_watch_, DBUS_WATCH_WRITABLE);
     CHECK(success) << "Unable to allocate memory";
   }
@@ -184,7 +185,8 @@
     : bus_type_(options.bus_type),
       connection_type_(options.connection_type),
       dbus_task_runner_(options.dbus_task_runner),
-      on_shutdown_(false /* manual_reset */, false /* initially_signaled */),
+      on_shutdown_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
+                   base::WaitableEvent::InitialState::NOT_SIGNALED),
       connection_(NULL),
       origin_thread_id_(base::PlatformThread::CurrentId()),
       async_operations_set_up_(false),
@@ -196,8 +198,8 @@
   dbus_threads_init_default();
   // The origin message loop is unnecessary if the client uses synchronous
   // functions only.
-  if (base::MessageLoop::current())
-    origin_task_runner_ = base::MessageLoop::current()->task_runner();
+  if (base::ThreadTaskRunnerHandle::IsSet())
+    origin_task_runner_ = base::ThreadTaskRunnerHandle::Get();
 }
 
 Bus::~Bus() {
@@ -878,7 +880,8 @@
     return "";
   }
 
-  scoped_ptr<Response> response(Response::FromRawMessage(response_message));
+  std::unique_ptr<Response> response(
+      Response::FromRawMessage(response_message));
   MessageReader reader(response.get());
 
   std::string service_owner;
@@ -1081,7 +1084,7 @@
 }
 
 void Bus::OnDispatchStatusChanged(DBusConnection* connection,
-                                  DBusDispatchStatus /* status */) {
+                                  DBusDispatchStatus /*status*/) {
   DCHECK_EQ(connection, connection_);
   AssertOnDBusThread();
 
@@ -1101,7 +1104,7 @@
   // |message| will be unrefed on exit of the function. Increment the
   // reference so we can use it in Signal::FromRawMessage() below.
   dbus_message_ref(message);
-  scoped_ptr<Signal> signal(Signal::FromRawMessage(message));
+  std::unique_ptr<Signal> signal(Signal::FromRawMessage(message));
 
   // Confirm the validity of the NameOwnerChanged signal.
   if (signal->GetMember() != kNameOwnerChangedSignal ||
@@ -1178,7 +1181,7 @@
 
 // static
 DBusHandlerResult Bus::OnServiceOwnerChangedFilter(
-    DBusConnection* /* connection */,
+    DBusConnection* /*connection*/,
     DBusMessage* message,
     void* data) {
   if (dbus_message_is_signal(message,
diff --git a/dbus/bus.h b/dbus/bus.h
index e5e0b1c..7d39159 100644
--- a/dbus/bus.h
+++ b/dbus/bus.h
@@ -88,7 +88,7 @@
 //       bus.GetObjectProxy(service_name, object_path);
 //
 //   dbus::MethodCall method_call(interface_name, method_name);
-//   scoped_ptr<dbus::Response> response(
+//   std::unique_ptr<dbus::Response> response(
 //       object_proxy.CallMethodAndBlock(&method_call, timeout_ms));
 //   if (response.get() != NULL) {  // Success.
 //     ...
diff --git a/dbus/dbus_statistics.cc b/dbus/dbus_statistics.cc
index e6eb5a2..e1e0973 100644
--- a/dbus/dbus_statistics.cc
+++ b/dbus/dbus_statistics.cc
@@ -4,11 +4,11 @@
 
 #include "dbus/dbus_statistics.h"
 
+#include <memory>
 #include <set>
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/threading/platform_thread.h"
@@ -108,7 +108,7 @@
                 const std::string& method,
                 bool add_stat) {
     DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
-    scoped_ptr<Stat> stat(new Stat(service, interface, method));
+    std::unique_ptr<Stat> stat(new Stat(service, interface, method));
     StatSet::iterator found = stats_.find(stat.get());
     if (found != stats_.end())
       return *found;
diff --git a/dbus/exported_object.cc b/dbus/exported_object.cc
index 889792a..b156308 100644
--- a/dbus/exported_object.cc
+++ b/dbus/exported_object.cc
@@ -169,8 +169,7 @@
 
   ScopedDBusError error;
 
-  DBusObjectPathVTable vtable;
-  memset(&vtable, 0, sizeof(vtable));
+  DBusObjectPathVTable vtable = {};
   vtable.message_function = &ExportedObject::HandleMessageThunk;
   vtable.unregister_function = &ExportedObject::OnUnregisteredThunk;
   const bool success = bus_->TryRegisterObjectPath(object_path_,
@@ -187,16 +186,15 @@
   return true;
 }
 
-DBusHandlerResult ExportedObject::HandleMessage(
-    DBusConnection* /* connection */,
-    DBusMessage* raw_message) {
+DBusHandlerResult ExportedObject::HandleMessage(DBusConnection*,
+                                                DBusMessage* raw_message) {
   bus_->AssertOnDBusThread();
   DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_CALL, dbus_message_get_type(raw_message));
 
   // raw_message will be unrefed on exit of the function. Increment the
   // reference so we can use it in MethodCall.
   dbus_message_ref(raw_message);
-  scoped_ptr<MethodCall> method_call(
+  std::unique_ptr<MethodCall> method_call(
       MethodCall::FromRawMessage(raw_message));
   const std::string interface = method_call->GetInterface();
   const std::string member = method_call->GetMember();
@@ -242,7 +240,7 @@
 }
 
 void ExportedObject::RunMethod(MethodCallCallback method_call_callback,
-                               scoped_ptr<MethodCall> method_call,
+                               std::unique_ptr<MethodCall> method_call,
                                base::TimeTicks start_time) {
   bus_->AssertOnOriginThread();
   MethodCall* method = method_call.get();
@@ -254,8 +252,8 @@
 }
 
 void ExportedObject::SendResponse(base::TimeTicks start_time,
-                                  scoped_ptr<MethodCall> method_call,
-                                  scoped_ptr<Response> response) {
+                                  std::unique_ptr<MethodCall> method_call,
+                                  std::unique_ptr<Response> response) {
   DCHECK(method_call);
   if (bus_->HasDBusThread()) {
     bus_->GetDBusTaskRunner()->PostTask(
@@ -270,8 +268,8 @@
   }
 }
 
-void ExportedObject::OnMethodCompleted(scoped_ptr<MethodCall> method_call,
-                                       scoped_ptr<Response> response,
+void ExportedObject::OnMethodCompleted(std::unique_ptr<MethodCall> method_call,
+                                       std::unique_ptr<Response> response,
                                        base::TimeTicks start_time) {
   bus_->AssertOnDBusThread();
 
@@ -287,11 +285,9 @@
 
   if (!response) {
     // Something bad happened in the method call.
-    scoped_ptr<ErrorResponse> error_response(
-        ErrorResponse::FromMethodCall(
-            method_call.get(),
-            DBUS_ERROR_FAILED,
-            "error occurred in " + method_call->GetMember()));
+    std::unique_ptr<ErrorResponse> error_response(ErrorResponse::FromMethodCall(
+        method_call.get(), DBUS_ERROR_FAILED,
+        "error occurred in " + method_call->GetMember()));
     bus_->Send(error_response->raw_message(), NULL);
     return;
   }
@@ -304,8 +300,7 @@
                       base::TimeTicks::Now() - start_time);
 }
 
-void ExportedObject::OnUnregistered(DBusConnection* /* connection */) {
-}
+void ExportedObject::OnUnregistered(DBusConnection*) {}
 
 DBusHandlerResult ExportedObject::HandleMessageThunk(
     DBusConnection* connection,
diff --git a/dbus/exported_object.h b/dbus/exported_object.h
index 89de096..69a63a5 100644
--- a/dbus/exported_object.h
+++ b/dbus/exported_object.h
@@ -8,6 +8,7 @@
 #include <dbus/dbus.h>
 
 #include <map>
+#include <memory>
 #include <string>
 #include <utility>
 
@@ -41,7 +42,8 @@
   // Called to send a response from an exported method. |response| is the
   // response message. Callers should pass NULL in the event of an error that
   // prevents the sending of a response.
-  typedef base::Callback<void (scoped_ptr<Response> response)> ResponseSender;
+  typedef base::Callback<void(std::unique_ptr<Response> response)>
+      ResponseSender;
 
   // Called when an exported method is called. |method_call| is the request
   // message. |sender| is the callback that's used to send a response.
@@ -138,20 +140,20 @@
 
   // Runs the method. Helper function for HandleMessage().
   void RunMethod(MethodCallCallback method_call_callback,
-                 scoped_ptr<MethodCall> method_call,
+                 std::unique_ptr<MethodCall> method_call,
                  base::TimeTicks start_time);
 
   // Callback invoked by service provider to send a response to a method call.
   // Can be called immediately from a MethodCallCallback to implement a
   // synchronous service or called later to implement an asynchronous service.
   void SendResponse(base::TimeTicks start_time,
-                    scoped_ptr<MethodCall> method_call,
-                    scoped_ptr<Response> response);
+                    std::unique_ptr<MethodCall> method_call,
+                    std::unique_ptr<Response> response);
 
   // Called on completion of the method run from SendResponse().
   // Takes ownership of |method_call| and |response|.
-  void OnMethodCompleted(scoped_ptr<MethodCall> method_call,
-                         scoped_ptr<Response> response,
+  void OnMethodCompleted(std::unique_ptr<MethodCall> method_call,
+                         std::unique_ptr<Response> response,
                          base::TimeTicks start_time);
 
   // Called when the object is unregistered.
diff --git a/dbus/file_descriptor.h b/dbus/file_descriptor.h
index b4f95cb..f8e8677 100644
--- a/dbus/file_descriptor.h
+++ b/dbus/file_descriptor.h
@@ -5,8 +5,9 @@
 #ifndef DBUS_FILE_DESCRIPTOR_H_
 #define DBUS_FILE_DESCRIPTOR_H_
 
-#include "base/memory/scoped_ptr.h"
-#include "base/move.h"
+#include <memory>
+
+#include "base/macros.h"
 #include "dbus/dbus_export.h"
 
 namespace dbus {
@@ -33,8 +34,6 @@
 // also allows the caller to do this work on the File thread to conform
 // with i/o restrictions.
 class CHROME_DBUS_EXPORT FileDescriptor {
-  MOVE_ONLY_TYPE_FOR_CPP_03(FileDescriptor);
-
  public:
   // This provides a simple way to pass around file descriptors since they must
   // be closed on a thread that is allowed to perform I/O.
@@ -81,10 +80,12 @@
   int value_;
   bool owner_;
   bool valid_;
+
+  DISALLOW_COPY_AND_ASSIGN(FileDescriptor);
 };
 
 using ScopedFileDescriptor =
-    scoped_ptr<FileDescriptor, FileDescriptor::Deleter>;
+    std::unique_ptr<FileDescriptor, FileDescriptor::Deleter>;
 
 }  // namespace dbus
 
diff --git a/dbus/message.cc b/dbus/message.cc
index 8a58dba..4a84756 100644
--- a/dbus/message.cc
+++ b/dbus/message.cc
@@ -398,23 +398,23 @@
 Response::Response() : Message() {
 }
 
-scoped_ptr<Response> Response::FromRawMessage(DBusMessage* raw_message) {
+std::unique_ptr<Response> Response::FromRawMessage(DBusMessage* raw_message) {
   DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_RETURN,
             dbus_message_get_type(raw_message));
 
-  scoped_ptr<Response> response(new Response);
+  std::unique_ptr<Response> response(new Response);
   response->Init(raw_message);
   return response;
 }
 
-scoped_ptr<Response> Response::FromMethodCall(MethodCall* method_call) {
-  scoped_ptr<Response> response(new Response);
+std::unique_ptr<Response> Response::FromMethodCall(MethodCall* method_call) {
+  std::unique_ptr<Response> response(new Response);
   response->Init(dbus_message_new_method_return(method_call->raw_message()));
   return response;
 }
 
-scoped_ptr<Response> Response::CreateEmpty() {
-  scoped_ptr<Response> response(new Response);
+std::unique_ptr<Response> Response::CreateEmpty() {
+  std::unique_ptr<Response> response(new Response);
   response->Init(dbus_message_new(DBUS_MESSAGE_TYPE_METHOD_RETURN));
   return response;
 }
@@ -426,20 +426,20 @@
 ErrorResponse::ErrorResponse() : Response() {
 }
 
-scoped_ptr<ErrorResponse> ErrorResponse::FromRawMessage(
+std::unique_ptr<ErrorResponse> ErrorResponse::FromRawMessage(
     DBusMessage* raw_message) {
   DCHECK_EQ(DBUS_MESSAGE_TYPE_ERROR, dbus_message_get_type(raw_message));
 
-  scoped_ptr<ErrorResponse> response(new ErrorResponse);
+  std::unique_ptr<ErrorResponse> response(new ErrorResponse);
   response->Init(raw_message);
   return response;
 }
 
-scoped_ptr<ErrorResponse> ErrorResponse::FromMethodCall(
+std::unique_ptr<ErrorResponse> ErrorResponse::FromMethodCall(
     MethodCall* method_call,
     const std::string& error_name,
     const std::string& error_message) {
-  scoped_ptr<ErrorResponse> response(new ErrorResponse);
+  std::unique_ptr<ErrorResponse> response(new ErrorResponse);
   response->Init(dbus_message_new_error(method_call->raw_message(),
                                         error_name.c_str(),
                                         error_message.c_str()));
@@ -599,6 +599,19 @@
   CloseContainer(&array_writer);
 }
 
+void MessageWriter::AppendArrayOfDoubles(const double* values, size_t length) {
+  DCHECK(!container_is_open_);
+  MessageWriter array_writer(message_);
+  OpenArray("d", &array_writer);
+  const bool success = dbus_message_iter_append_fixed_array(
+      &(array_writer.raw_message_iter_),
+      DBUS_TYPE_DOUBLE,
+      &values,
+      static_cast<int>(length));
+  CHECK(success) << "Unable to allocate memory";
+  CloseContainer(&array_writer);
+}
+
 void MessageWriter::AppendArrayOfStrings(
     const std::vector<std::string>& strings) {
   DCHECK(!container_is_open_);
@@ -822,7 +835,26 @@
   dbus_message_iter_get_fixed_array(&array_reader.raw_message_iter_,
                                     bytes,
                                     &int_length);
-  *length = static_cast<int>(int_length);
+  *length = static_cast<size_t>(int_length);
+  return true;
+}
+
+bool MessageReader::PopArrayOfDoubles(const double** doubles, size_t* length) {
+  MessageReader array_reader(message_);
+  if (!PopArray(&array_reader))
+    return false;
+  if (!array_reader.HasMoreData()) {
+    *length = 0;
+    *doubles = nullptr;
+    return true;
+  }
+  if (!array_reader.CheckDataType(DBUS_TYPE_DOUBLE))
+    return false;
+  int int_length = 0;
+  dbus_message_iter_get_fixed_array(&array_reader.raw_message_iter_,
+                                    doubles,
+                                    &int_length);
+  *length = static_cast<size_t>(int_length);
   return true;
 }
 
diff --git a/dbus/message.h b/dbus/message.h
index 7dffe0e..0aa010c 100644
--- a/dbus/message.h
+++ b/dbus/message.h
@@ -8,11 +8,12 @@
 #include <dbus/dbus.h>
 #include <stddef.h>
 #include <stdint.h>
+
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "dbus/dbus_export.h"
 #include "dbus/file_descriptor.h"
 #include "dbus/object_path.h"
@@ -204,16 +205,16 @@
  public:
   // Returns a newly created Response from the given raw message of the
   // type DBUS_MESSAGE_TYPE_METHOD_RETURN. Takes the ownership of |raw_message|.
-  static scoped_ptr<Response> FromRawMessage(DBusMessage* raw_message);
+  static std::unique_ptr<Response> FromRawMessage(DBusMessage* raw_message);
 
   // Returns a newly created Response from the given method call.
   // Used for implementing exported methods. Does NOT take the ownership of
   // |method_call|.
-  static scoped_ptr<Response> FromMethodCall(MethodCall* method_call);
+  static std::unique_ptr<Response> FromMethodCall(MethodCall* method_call);
 
   // Returns a newly created Response with an empty payload.
   // Useful for testing.
-  static scoped_ptr<Response> CreateEmpty();
+  static std::unique_ptr<Response> CreateEmpty();
 
  protected:
   // Creates a Response message. The internal raw message is NULL.
@@ -229,13 +230,14 @@
  public:
   // Returns a newly created Response from the given raw message of the
   // type DBUS_MESSAGE_TYPE_METHOD_RETURN. Takes the ownership of |raw_message|.
-  static scoped_ptr<ErrorResponse> FromRawMessage(DBusMessage* raw_message);
+  static std::unique_ptr<ErrorResponse> FromRawMessage(
+      DBusMessage* raw_message);
 
   // Returns a newly created ErrorResponse from the given method call, the
   // error name, and the error message.  The error name looks like
   // "org.freedesktop.DBus.Error.Failed". Used for returning an error to a
   // failed method call. Does NOT take the ownership of |method_call|.
-  static scoped_ptr<ErrorResponse> FromMethodCall(
+  static std::unique_ptr<ErrorResponse> FromMethodCall(
       MethodCall* method_call,
       const std::string& error_name,
       const std::string& error_message);
@@ -312,6 +314,9 @@
   // function.
   void AppendArrayOfBytes(const uint8_t* values, size_t length);
 
+  // Appends the array of doubles. Used for audio mixer matrix doubles.
+  void AppendArrayOfDoubles(const double* values, size_t length);
+
   // Appends the array of strings. Arrays of strings are often used for
   // exchanging lists of names hence it's worth having a specialized
   // function.
@@ -415,6 +420,9 @@
   // after the MessageReader is destroyed.
   bool PopArrayOfBytes(const uint8_t** bytes, size_t* length);
 
+  // Gets the array of doubles at the current iterator position.
+  bool PopArrayOfDoubles(const double** doubles, size_t* length);
+
   // Gets the array of strings at the current iterator position. |strings| is
   // cleared before being modified. Returns true and advances the iterator on
   // success.
diff --git a/dbus/mock_bus.h b/dbus/mock_bus.h
index 40b090b..b50f230 100644
--- a/dbus/mock_bus.h
+++ b/dbus/mock_bus.h
@@ -26,6 +26,15 @@
                ObjectProxy*(const std::string& service_name,
                             const ObjectPath& object_path,
                             int options));
+  MOCK_METHOD3(RemoveObjectProxy, bool(
+      const std::string& service_name,
+      const ObjectPath& object_path,
+      const base::Closure& callback));
+  MOCK_METHOD4(RemoveObjectProxyWithOptions, bool(
+      const std::string& service_name,
+      const ObjectPath& object_path,
+      int options,
+      const base::Closure& callback));
   MOCK_METHOD1(GetExportedObject, ExportedObject*(
       const ObjectPath& object_path));
   MOCK_METHOD2(GetObjectManager, ObjectManager*(const std::string&,
diff --git a/dbus/mock_object_proxy.h b/dbus/mock_object_proxy.h
index 66f485a..f27f6f6 100644
--- a/dbus/mock_object_proxy.h
+++ b/dbus/mock_object_proxy.h
@@ -21,27 +21,28 @@
                   const std::string& service_name,
                   const ObjectPath& object_path);
 
-  // GMock doesn't support the return type of scoped_ptr<> because scoped_ptr is
-  // uncopyable. This is a workaround which defines |MockCallMethodAndBlock| as
-  // a mock method and makes |CallMethodAndBlock| call the mocked method.
-  // Use |MockCallMethodAndBlock| for setting/testing expectations.
+  // GMock doesn't support the return type of std::unique_ptr<> because
+  // std::unique_ptr is uncopyable. This is a workaround which defines
+  // |MockCallMethodAndBlock| as a mock method and makes
+  // |CallMethodAndBlock| call the mocked method.  Use |MockCallMethodAndBlock|
+  // for setting/testing expectations.
   MOCK_METHOD3(MockCallMethodAndBlockWithErrorDetails,
                Response*(MethodCall* method_call,
                          int timeout_ms,
                          ScopedDBusError* error));
-  scoped_ptr<Response> CallMethodAndBlockWithErrorDetails(
+  std::unique_ptr<Response> CallMethodAndBlockWithErrorDetails(
       MethodCall* method_call,
       int timeout_ms,
       ScopedDBusError* error) override {
-    return scoped_ptr<Response>(
+    return std::unique_ptr<Response>(
         MockCallMethodAndBlockWithErrorDetails(method_call, timeout_ms, error));
   }
   MOCK_METHOD2(MockCallMethodAndBlock, Response*(MethodCall* method_call,
                                                  int timeout_ms));
-  scoped_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
-                                          int timeout_ms) override {
-    return scoped_ptr<Response>(MockCallMethodAndBlock(method_call,
-                                                       timeout_ms));
+  std::unique_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
+                                               int timeout_ms) override {
+    return std::unique_ptr<Response>(
+        MockCallMethodAndBlock(method_call, timeout_ms));
   }
   MOCK_METHOD3(CallMethod, void(MethodCall* method_call,
                                 int timeout_ms,
diff --git a/dbus/object_manager.cc b/dbus/object_manager.cc
index 34b881c..178bb5f 100644
--- a/dbus/object_manager.cc
+++ b/dbus/object_manager.cc
@@ -249,7 +249,7 @@
   return self->HandleMessage(connection, raw_message);
 }
 
-DBusHandlerResult ObjectManager::HandleMessage(DBusConnection* /* connection */,
+DBusHandlerResult ObjectManager::HandleMessage(DBusConnection*,
                                                DBusMessage* raw_message) {
   DCHECK(bus_);
   bus_->AssertOnDBusThread();
@@ -263,8 +263,7 @@
   // raw_message will be unrefed on exit of the function. Increment the
   // reference so we can use it in Signal.
   dbus_message_ref(raw_message);
-  scoped_ptr<Signal> signal(
-      Signal::FromRawMessage(raw_message));
+  std::unique_ptr<Signal> signal(Signal::FromRawMessage(raw_message));
 
   const std::string interface = signal->GetInterface();
   const std::string member = signal->GetMember();
@@ -387,8 +386,8 @@
 }
 
 void ObjectManager::InterfacesAddedConnected(
-    const std::string& /* interface_name */,
-    const std::string& /* signal_name */,
+    const std::string& /*interface_name*/,
+    const std::string& /*signal_name*/,
     bool success) {
   LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
                             << ": Failed to connect to InterfacesAdded signal.";
@@ -412,8 +411,8 @@
 }
 
 void ObjectManager::InterfacesRemovedConnected(
-    const std::string& /* interface_name */,
-    const std::string& /* signal_name */,
+    const std::string& /*interface_name*/,
+    const std::string& /*signal_name*/,
     bool success) {
   LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
                             << ": Failed to connect to "
diff --git a/dbus/object_manager.h b/dbus/object_manager.h
index 25834c5..a97495e 100644
--- a/dbus/object_manager.h
+++ b/dbus/object_manager.h
@@ -166,8 +166,8 @@
     // called on each interface implementation with differing values of
     // |interface_name| as appropriate. An implementation class will only
     // receive multiple calls if it has registered for multiple interfaces.
-    virtual void ObjectAdded(const ObjectPath& /* object_path */,
-                             const std::string& /* interface_name */) { }
+    virtual void ObjectAdded(const ObjectPath& /*object_path*/,
+                             const std::string& /*interface_name*/) {}
 
     // Called by ObjectManager to inform the implementation class than an
     // object with the path |object_path| has been removed. Ths D-Bus interface
@@ -178,8 +178,8 @@
     // This method will be called before the Properties structure and the
     // ObjectProxy object for the given interface are cleaned up, it is safe
     // to retrieve them during removal to vary processing.
-    virtual void ObjectRemoved(const ObjectPath& /* object_path */,
-                               const std::string& /* interface_name */) { }
+    virtual void ObjectRemoved(const ObjectPath& /*object_path*/,
+                               const std::string& /*interface_name*/) {}
   };
 
   // Client code should use Bus::GetObjectManager() instead of this constructor.
diff --git a/dbus/object_proxy.cc b/dbus/object_proxy.cc
index 9fb3ee4..ce02551 100644
--- a/dbus/object_proxy.cc
+++ b/dbus/object_proxy.cc
@@ -69,14 +69,16 @@
 // Originally we tried to make |method_call| a const reference, but we
 // gave up as dbus_connection_send_with_reply_and_block() takes a
 // non-const pointer of DBusMessage as the second parameter.
-scoped_ptr<Response> ObjectProxy::CallMethodAndBlockWithErrorDetails(
-    MethodCall* method_call, int timeout_ms, ScopedDBusError* error) {
+std::unique_ptr<Response> ObjectProxy::CallMethodAndBlockWithErrorDetails(
+    MethodCall* method_call,
+    int timeout_ms,
+    ScopedDBusError* error) {
   bus_->AssertOnDBusThread();
 
   if (!bus_->Connect() ||
       !method_call->SetDestination(service_name_) ||
       !method_call->SetPath(object_path_))
-    return scoped_ptr<Response>();
+    return std::unique_ptr<Response>();
 
   DBusMessage* request_message = method_call->raw_message();
 
@@ -97,7 +99,7 @@
                          method_call->GetMember(),
                          error->is_set() ? error->name() : "unknown error type",
                          error->is_set() ? error->message() : "");
-    return scoped_ptr<Response>();
+    return std::unique_ptr<Response>();
   }
   // Record time spent for the method call. Don't include failures.
   UMA_HISTOGRAM_TIMES("DBus.SyncMethodCallTime",
@@ -106,8 +108,9 @@
   return Response::FromRawMessage(response_message);
 }
 
-scoped_ptr<Response> ObjectProxy::CallMethodAndBlock(MethodCall* method_call,
-                                                     int timeout_ms) {
+std::unique_ptr<Response> ObjectProxy::CallMethodAndBlock(
+    MethodCall* method_call,
+    int timeout_ms) {
   ScopedDBusError error;
   return CallMethodAndBlockWithErrorDetails(method_call, timeout_ms, &error);
 }
@@ -325,7 +328,7 @@
   } else if (dbus_message_get_type(response_message) ==
              DBUS_MESSAGE_TYPE_ERROR) {
     // This will take |response_message| and release (unref) it.
-    scoped_ptr<ErrorResponse> error_response(
+    std::unique_ptr<ErrorResponse> error_response(
         ErrorResponse::FromRawMessage(response_message));
     error_callback.Run(error_response.get());
     // Delete the message  on the D-Bus thread. See below for why.
@@ -335,7 +338,8 @@
                    error_response.release()));
   } else {
     // This will take |response_message| and release (unref) it.
-    scoped_ptr<Response> response(Response::FromRawMessage(response_message));
+    std::unique_ptr<Response> response(
+        Response::FromRawMessage(response_message));
     // The response is successfully received.
     response_callback.Run(response.get());
     // The message should be deleted on the D-Bus thread for a complicated
@@ -455,9 +459,8 @@
   }
 }
 
-DBusHandlerResult ObjectProxy::HandleMessage(
-    DBusConnection* /* connection */,
-    DBusMessage* raw_message) {
+DBusHandlerResult ObjectProxy::HandleMessage(DBusConnection*,
+                                             DBusMessage* raw_message) {
   bus_->AssertOnDBusThread();
 
   if (dbus_message_get_type(raw_message) != DBUS_MESSAGE_TYPE_SIGNAL)
@@ -466,8 +469,7 @@
   // raw_message will be unrefed on exit of the function. Increment the
   // reference so we can use it in Signal.
   dbus_message_ref(raw_message);
-  scoped_ptr<Signal> signal(
-      Signal::FromRawMessage(raw_message));
+  std::unique_ptr<Signal> signal(Signal::FromRawMessage(raw_message));
 
   // Verify the signal comes from the object we're proxying for, this is
   // our last chance to return DBUS_HANDLER_RESULT_NOT_YET_HANDLED and
@@ -565,17 +567,19 @@
   if (ignore_service_unknown_errors_ &&
       (error_name == kErrorServiceUnknown || error_name == kErrorObjectUnknown))
     return;
-  logging::LogSeverity severity = logging::LOG_ERROR;
-  // "UnknownObject" indicates that an object or service is no longer available,
-  // e.g. a Shill network service has gone out of range. Treat these as warnings
-  // not errors.
-  if (error_name == kErrorObjectUnknown)
-    severity = logging::LOG_WARNING;
+
   std::ostringstream msg;
   msg << "Failed to call method: " << interface_name << "." << method_name
       << ": object_path= " << object_path_.value()
       << ": " << error_name << ": " << error_message;
-  logging::LogAtLevel(severity, msg.str());
+
+  // "UnknownObject" indicates that an object or service is no longer available,
+  // e.g. a Shill network service has gone out of range. Treat these as warnings
+  // not errors.
+  if (error_name == kErrorObjectUnknown)
+    LOG(WARNING) << msg.str();
+  else
+    LOG(ERROR) << msg.str();
 }
 
 void ObjectProxy::OnCallMethodError(const std::string& interface_name,
@@ -657,7 +661,7 @@
 }
 
 DBusHandlerResult ObjectProxy::HandleNameOwnerChanged(
-    scoped_ptr<Signal> signal) {
+    std::unique_ptr<Signal> signal) {
   DCHECK(signal);
   bus_->AssertOnDBusThread();
 
diff --git a/dbus/object_proxy.h b/dbus/object_proxy.h
index edb97a5..033e886 100644
--- a/dbus/object_proxy.h
+++ b/dbus/object_proxy.h
@@ -8,6 +8,7 @@
 #include <dbus/dbus.h>
 
 #include <map>
+#include <memory>
 #include <set>
 #include <string>
 #include <vector>
@@ -97,7 +98,7 @@
   // in the |error| object.
   //
   // BLOCKING CALL.
-  virtual scoped_ptr<Response> CallMethodAndBlockWithErrorDetails(
+  virtual std::unique_ptr<Response> CallMethodAndBlockWithErrorDetails(
       MethodCall* method_call,
       int timeout_ms,
       ScopedDBusError* error);
@@ -106,8 +107,8 @@
   // is returned. Returns NULL on error.
   //
   // BLOCKING CALL.
-  virtual scoped_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
-                                                  int timeout_ms);
+  virtual std::unique_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
+                                                       int timeout_ms);
 
   // Requests to call the method of the remote object.
   //
@@ -289,7 +290,8 @@
   void UpdateNameOwnerAndBlock();
 
   // Handles NameOwnerChanged signal from D-Bus's special message bus.
-  DBusHandlerResult HandleNameOwnerChanged(scoped_ptr<dbus::Signal> signal);
+  DBusHandlerResult HandleNameOwnerChanged(
+      std::unique_ptr<dbus::Signal> signal);
 
   // Runs |name_owner_changed_callback_|.
   void RunNameOwnerChangedCallback(const std::string& old_owner,
diff --git a/dbus/property.cc b/dbus/property.cc
index 156d0c7..aa58436 100644
--- a/dbus/property.cc
+++ b/dbus/property.cc
@@ -89,7 +89,7 @@
   }
 }
 
-void PropertySet::ChangedConnected(const std::string& /* interface_name */,
+void PropertySet::ChangedConnected(const std::string& /*interface_name*/,
                                    const std::string& signal_name,
                                    bool success) {
   LOG_IF(WARNING, !success) << "Failed to connect to " << signal_name
@@ -141,9 +141,8 @@
   writer.AppendString(property->name());
 
   DCHECK(object_proxy_);
-  scoped_ptr<dbus::Response> response(
-      object_proxy_->CallMethodAndBlock(&method_call,
-                                        ObjectProxy::TIMEOUT_USE_DEFAULT));
+  std::unique_ptr<dbus::Response> response(object_proxy_->CallMethodAndBlock(
+      &method_call, ObjectProxy::TIMEOUT_USE_DEFAULT));
 
   if (!response.get()) {
     LOG(WARNING) << property->name() << ": GetAndBlock: failed.";
@@ -212,9 +211,8 @@
   property->AppendSetValueToWriter(&writer);
 
   DCHECK(object_proxy_);
-  scoped_ptr<dbus::Response> response(
-      object_proxy_->CallMethodAndBlock(&method_call,
-                                        ObjectProxy::TIMEOUT_USE_DEFAULT));
+  std::unique_ptr<dbus::Response> response(object_proxy_->CallMethodAndBlock(
+      &method_call, ObjectProxy::TIMEOUT_USE_DEFAULT));
   if (response.get())
     return true;
   return false;
diff --git a/dbus/values_util.cc b/dbus/values_util.cc
index e932312..bea7bea 100644
--- a/dbus/values_util.cc
+++ b/dbus/values_util.cc
@@ -4,9 +4,11 @@
 
 #include "dbus/values_util.h"
 
+#include <utility>
+
 #include "base/json/json_writer.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/values.h"
 #include "dbus/message.h"
 
@@ -23,10 +25,10 @@
 // Pops values from |reader| and appends them to |list_value|.
 bool PopListElements(MessageReader* reader, base::ListValue* list_value) {
   while (reader->HasMoreData()) {
-    base::Value* element_value = PopDataAsValue(reader);
+    std::unique_ptr<base::Value> element_value = PopDataAsValue(reader);
     if (!element_value)
       return false;
-    list_value->Append(element_value);
+    list_value->Append(std::move(element_value));
   }
   return true;
 }
@@ -47,17 +49,17 @@
         return false;
     } else {
       // If the type of keys is not STRING, convert it to string.
-      scoped_ptr<base::Value> key(PopDataAsValue(&entry_reader));
+      std::unique_ptr<base::Value> key(PopDataAsValue(&entry_reader));
       if (!key)
         return false;
       // Use JSONWriter to convert an arbitrary value to a string.
       base::JSONWriter::Write(*key, &key_string);
     }
     // Get the value and set the key-value pair.
-    base::Value* value = PopDataAsValue(&entry_reader);
+    std::unique_ptr<base::Value> value = PopDataAsValue(&entry_reader);
     if (!value)
       return false;
-    dictionary_value->SetWithoutPathExpansion(key_string, value);
+    dictionary_value->SetWithoutPathExpansion(key_string, std::move(value));
   }
   return true;
 }
@@ -87,8 +89,8 @@
 
 }  // namespace
 
-base::Value* PopDataAsValue(MessageReader* reader) {
-  base::Value* result = NULL;
+std::unique_ptr<base::Value> PopDataAsValue(MessageReader* reader) {
+  std::unique_ptr<base::Value> result;
   switch (reader->GetDataType()) {
     case Message::INVALID_DATA:
       // Do nothing.
@@ -96,37 +98,39 @@
     case Message::BYTE: {
       uint8_t value = 0;
       if (reader->PopByte(&value))
-        result = new base::FundamentalValue(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::BOOL: {
       bool value = false;
       if (reader->PopBool(&value))
-        result = new base::FundamentalValue(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::INT16: {
       int16_t value = 0;
       if (reader->PopInt16(&value))
-        result = new base::FundamentalValue(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::UINT16: {
       uint16_t value = 0;
       if (reader->PopUint16(&value))
-        result = new base::FundamentalValue(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::INT32: {
       int32_t value = 0;
       if (reader->PopInt32(&value))
-        result = new base::FundamentalValue(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::UINT32: {
       uint32_t value = 0;
-      if (reader->PopUint32(&value))
-        result = new base::FundamentalValue(static_cast<double>(value));
+      if (reader->PopUint32(&value)) {
+        result = base::MakeUnique<base::FundamentalValue>(
+            static_cast<double>(value));
+      }
       break;
     }
     case Message::INT64: {
@@ -134,7 +138,8 @@
       if (reader->PopInt64(&value)) {
         DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
             value << " is not exactly representable by double";
-        result = new base::FundamentalValue(static_cast<double>(value));
+        result = base::MakeUnique<base::FundamentalValue>(
+            static_cast<double>(value));
       }
       break;
     }
@@ -143,26 +148,27 @@
       if (reader->PopUint64(&value)) {
         DLOG_IF(WARNING, !IsExactlyRepresentableByDouble(value)) <<
             value << " is not exactly representable by double";
-        result = new base::FundamentalValue(static_cast<double>(value));
+        result = base::MakeUnique<base::FundamentalValue>(
+            static_cast<double>(value));
       }
       break;
     }
     case Message::DOUBLE: {
       double value = 0;
       if (reader->PopDouble(&value))
-        result = new base::FundamentalValue(value);
+        result = base::MakeUnique<base::FundamentalValue>(value);
       break;
     }
     case Message::STRING: {
       std::string value;
       if (reader->PopString(&value))
-        result = new base::StringValue(value);
+        result = base::MakeUnique<base::StringValue>(value);
       break;
     }
     case Message::OBJECT_PATH: {
       ObjectPath value;
       if (reader->PopObjectPath(&value))
-        result = new base::StringValue(value.value());
+        result = base::MakeUnique<base::StringValue>(value.value());
       break;
     }
     case Message::UNIX_FD: {
@@ -176,14 +182,14 @@
         // If the type of the array's element is DICT_ENTRY, create a
         // DictionaryValue, otherwise create a ListValue.
         if (sub_reader.GetDataType() == Message::DICT_ENTRY) {
-          scoped_ptr<base::DictionaryValue> dictionary_value(
+          std::unique_ptr<base::DictionaryValue> dictionary_value(
               new base::DictionaryValue);
           if (PopDictionaryEntries(&sub_reader, dictionary_value.get()))
-            result = dictionary_value.release();
+            result = std::move(dictionary_value);
         } else {
-          scoped_ptr<base::ListValue> list_value(new base::ListValue);
+          std::unique_ptr<base::ListValue> list_value(new base::ListValue);
           if (PopListElements(&sub_reader, list_value.get()))
-            result = list_value.release();
+            result = std::move(list_value);
         }
       }
       break;
@@ -191,9 +197,9 @@
     case Message::STRUCT: {
       MessageReader sub_reader(NULL);
       if (reader->PopStruct(&sub_reader)) {
-        scoped_ptr<base::ListValue> list_value(new base::ListValue);
+        std::unique_ptr<base::ListValue> list_value(new base::ListValue);
         if (PopListElements(&sub_reader, list_value.get()))
-          result = list_value.release();
+          result = std::move(list_value);
       }
       break;
     }
@@ -278,9 +284,7 @@
       value.GetAsList(&list);
       dbus::MessageWriter array_writer(NULL);
       writer->OpenArray("v", &array_writer);
-      for (base::ListValue::const_iterator iter = list->begin();
-           iter != list->end(); ++iter) {
-        const base::Value* value = *iter;
+      for (const auto& value : *list) {
         AppendValueDataAsVariant(&array_writer, *value);
       }
       writer->CloseContainer(&array_writer);
diff --git a/dbus/values_util.h b/dbus/values_util.h
index b6f4ff3..81b839b 100644
--- a/dbus/values_util.h
+++ b/dbus/values_util.h
@@ -7,6 +7,8 @@
 
 #include <stdint.h>
 
+#include <memory>
+
 #include "dbus/dbus_export.h"
 
 namespace base {
@@ -22,7 +24,8 @@
 // Returns NULL if an error occurs.
 // Note: Integer values larger than int32_t (including uint32_t) are converted
 // to double.  Non-string dictionary keys are converted to strings.
-CHROME_DBUS_EXPORT base::Value* PopDataAsValue(MessageReader* reader);
+CHROME_DBUS_EXPORT std::unique_ptr<base::Value> PopDataAsValue(
+    MessageReader* reader);
 
 // Appends a basic type value to |writer|. Basic types are BOOLEAN, INTEGER,
 // DOUBLE, and STRING. Use this function for values that are known to be basic
diff --git a/sandbox/BUILD.gn b/sandbox/BUILD.gn
index 6825a1d..8ca3574 100644
--- a/sandbox/BUILD.gn
+++ b/sandbox/BUILD.gn
@@ -9,12 +9,10 @@
       "//sandbox/win:sandbox",
     ]
   } else if (is_mac) {
-    # TODO(GYP): Make sandbox compile w/ 10.6 SDK.
-    if (false) {
-      public_deps = [
-        "//sandbox/mac:sandbox",
-      ]
-    }
+    public_deps = [
+      "//sandbox/mac:sandbox",
+      "//sandbox/mac:seatbelt",
+    ]
   } else if (is_linux || is_android) {
     public_deps = [
       "//sandbox/linux:sandbox",
diff --git a/sandbox/linux/BUILD.gn b/sandbox/linux/BUILD.gn
index 341d363..a5c041f 100644
--- a/sandbox/linux/BUILD.gn
+++ b/sandbox/linux/BUILD.gn
@@ -192,24 +192,15 @@
       rebase_path(outputs, root_build_dir) + rebase_path(inputs, root_build_dir)
 }
 
-# TODO(GYP): Delete this after we've converted everything to GN.
-# The _run targets exist only for compatibility w/ GYP.
-group("sandbox_linux_unittests_run") {
-  testonly = true
-  deps = [
-    ":sandbox_linux_unittests",
-  ]
-}
 
-# The main sandboxing test target. "sandbox_linux_unittests" cannot use the
-# test() template because the test is run as an executable not as an APK on
-# Android.
-executable("sandbox_linux_unittests") {
-  testonly = true
+test("sandbox_linux_unittests") {
   deps = [
     ":sandbox_linux_unittests_sources",
     "//build/config/sanitizers:deps",
   ]
+  if (is_android) {
+    use_raw_android_executable = true
+  }
 }
 
 component("seccomp_bpf") {
@@ -242,9 +233,11 @@
   ]
   defines = [ "SANDBOX_IMPLEMENTATION" ]
 
+  public_deps = [
+    ":sandbox_services_headers",
+  ]
   deps = [
     ":sandbox_services",
-    ":sandbox_services_headers",
     "//base",
   ]
 
@@ -312,9 +305,24 @@
       "-Wno-sign-compare",
     ]
 
-    deps = [
-      "//build/config/sanitizers:deps",
-    ]
+    import("//build/config/compiler/compiler.gni")
+    import("//build/config/sanitizers/sanitizers.gni")
+    if (is_component_build || using_sanitizer) {
+      # WARNING! We remove this config so that we don't accidentally
+      # pick up the //build/config:rpath_for_built_shared_libraries
+      # sub-config. However, this means that we need to duplicate any
+      # other flags that executable_config might have.
+      configs -= [ "//build/config:executable_config" ]
+      if (!use_gold) {
+        ldflags = [ "-Wl,--disable-new-dtags" ]
+      }
+    }
+
+    # We also do not want to pick up any of the other sanitizer
+    # flags (i.e. we do not want to build w/ the sanitizers at all).
+    # This is safe to delete unconditionally, because it is part of the
+    # default configs and empty when not using the sanitizers.
+    configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
   }
 }
 
@@ -351,6 +359,7 @@
 
   defines = [ "SANDBOX_IMPLEMENTATION" ]
 
+  public_deps = []
   deps = [
     "//base",
   ]
@@ -365,7 +374,7 @@
       "services/namespace_utils.h",
     ]
 
-    deps += [ ":sandbox_services_headers" ]
+    public_deps += [ ":sandbox_services_headers" ]
   }
 
   if (is_nacl_nonsfi) {
@@ -442,23 +451,11 @@
 }
 
 if (is_android) {
-  create_native_executable_dist("sandbox_linux_unittests_deps") {
+  # TODO(GYP_GONE) Delete this after we've converted everything to GN.
+  group("sandbox_linux_unittests_deps") {
     testonly = true
-    dist_dir = "$root_out_dir/sandbox_linux_unittests_deps"
-    binary = "$root_out_dir/sandbox_linux_unittests"
     deps = [
       ":sandbox_linux_unittests",
     ]
-
-    if (is_component_build) {
-      deps += [ "//build/android:cpplib_stripped" ]
-    }
-  }
-
-  test_runner_script("sandbox_linux_unittests__test_runner_script") {
-    test_name = "sandbox_linux_unittests"
-    test_type = "gtest"
-    test_suite = "sandbox_linux_unittests"
-    isolate_file = "//sandbox/sandbox_linux_unittests_android.isolate"
   }
 }
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl.cc b/sandbox/linux/bpf_dsl/bpf_dsl.cc
index 3330c47..fed6368 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl.cc
+++ b/sandbox/linux/bpf_dsl/bpf_dsl.cc
@@ -11,7 +11,6 @@
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
 #include "sandbox/linux/bpf_dsl/errorcode.h"
 #include "sandbox/linux/bpf_dsl/policy_compiler.h"
@@ -24,6 +23,7 @@
 class ReturnResultExprImpl : public internal::ResultExprImpl {
  public:
   explicit ReturnResultExprImpl(uint32_t ret) : ret_(ret) {}
+  ~ReturnResultExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc) const override {
     return pc->Return(ret_);
@@ -36,8 +36,6 @@
   }
 
  private:
-  ~ReturnResultExprImpl() override {}
-
   bool IsAction(uint32_t action) const {
     return (ret_ & SECCOMP_RET_ACTION) == action;
   }
@@ -53,6 +51,7 @@
       : func_(func), arg_(arg), safe_(safe) {
     DCHECK(func_);
   }
+  ~TrapResultExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc) const override {
     return pc->Trap(func_, arg_, safe_);
@@ -63,8 +62,6 @@
   bool IsDeny() const override { return true; }
 
  private:
-  ~TrapResultExprImpl() override {}
-
   TrapRegistry::TrapFnc func_;
   const void* arg_;
   bool safe_;
@@ -74,10 +71,13 @@
 
 class IfThenResultExprImpl : public internal::ResultExprImpl {
  public:
-  IfThenResultExprImpl(const BoolExpr& cond,
-                       const ResultExpr& then_result,
-                       const ResultExpr& else_result)
-      : cond_(cond), then_result_(then_result), else_result_(else_result) {}
+  IfThenResultExprImpl(BoolExpr cond,
+                       ResultExpr then_result,
+                       ResultExpr else_result)
+      : cond_(std::move(cond)),
+        then_result_(std::move(then_result)),
+        else_result_(std::move(else_result)) {}
+  ~IfThenResultExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc) const override {
     // We compile the "then" and "else" expressions in separate statements so
@@ -92,8 +92,6 @@
   }
 
  private:
-  ~IfThenResultExprImpl() override {}
-
   BoolExpr cond_;
   ResultExpr then_result_;
   ResultExpr else_result_;
@@ -104,6 +102,7 @@
 class ConstBoolExprImpl : public internal::BoolExprImpl {
  public:
   ConstBoolExprImpl(bool value) : value_(value) {}
+  ~ConstBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -112,8 +111,6 @@
   }
 
  private:
-  ~ConstBoolExprImpl() override {}
-
   bool value_;
 
   DISALLOW_COPY_AND_ASSIGN(ConstBoolExprImpl);
@@ -126,6 +123,7 @@
                           uint64_t mask,
                           uint64_t value)
       : argno_(argno), width_(width), mask_(mask), value_(value) {}
+  ~MaskedEqualBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -134,8 +132,6 @@
   }
 
  private:
-  ~MaskedEqualBoolExprImpl() override {}
-
   int argno_;
   size_t width_;
   uint64_t mask_;
@@ -146,7 +142,8 @@
 
 class NegateBoolExprImpl : public internal::BoolExprImpl {
  public:
-  explicit NegateBoolExprImpl(const BoolExpr& cond) : cond_(cond) {}
+  explicit NegateBoolExprImpl(BoolExpr cond) : cond_(std::move(cond)) {}
+  ~NegateBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -155,8 +152,6 @@
   }
 
  private:
-  ~NegateBoolExprImpl() override {}
-
   BoolExpr cond_;
 
   DISALLOW_COPY_AND_ASSIGN(NegateBoolExprImpl);
@@ -164,8 +159,9 @@
 
 class AndBoolExprImpl : public internal::BoolExprImpl {
  public:
-  AndBoolExprImpl(const BoolExpr& lhs, const BoolExpr& rhs)
-      : lhs_(lhs), rhs_(rhs) {}
+  AndBoolExprImpl(BoolExpr lhs, BoolExpr rhs)
+      : lhs_(std::move(lhs)), rhs_(std::move(rhs)) {}
+  ~AndBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -175,8 +171,6 @@
   }
 
  private:
-  ~AndBoolExprImpl() override {}
-
   BoolExpr lhs_;
   BoolExpr rhs_;
 
@@ -185,8 +179,9 @@
 
 class OrBoolExprImpl : public internal::BoolExprImpl {
  public:
-  OrBoolExprImpl(const BoolExpr& lhs, const BoolExpr& rhs)
-      : lhs_(lhs), rhs_(rhs) {}
+  OrBoolExprImpl(BoolExpr lhs, BoolExpr rhs)
+      : lhs_(std::move(lhs)), rhs_(std::move(rhs)) {}
+  ~OrBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -196,8 +191,6 @@
   }
 
  private:
-  ~OrBoolExprImpl() override {}
-
   BoolExpr lhs_;
   BoolExpr rhs_;
 
@@ -237,64 +230,63 @@
   // accordingly.
   CHECK(size == 4 || size == 8);
 
-  return BoolExpr(new const MaskedEqualBoolExprImpl(num, size, mask, val));
+  return std::make_shared<MaskedEqualBoolExprImpl>(num, size, mask, val);
 }
 
 }  // namespace internal
 
 ResultExpr Allow() {
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_ALLOW));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_ALLOW);
 }
 
 ResultExpr Error(int err) {
   CHECK(err >= ErrorCode::ERR_MIN_ERRNO && err <= ErrorCode::ERR_MAX_ERRNO);
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_ERRNO + err));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_ERRNO + err);
 }
 
 ResultExpr Kill() {
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_KILL));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_KILL);
 }
 
 ResultExpr Trace(uint16_t aux) {
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_TRACE + aux));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_TRACE + aux);
 }
 
 ResultExpr Trap(TrapRegistry::TrapFnc trap_func, const void* aux) {
-  return ResultExpr(
-      new const TrapResultExprImpl(trap_func, aux, true /* safe */));
+  return std::make_shared<TrapResultExprImpl>(trap_func, aux, true /* safe */);
 }
 
 ResultExpr UnsafeTrap(TrapRegistry::TrapFnc trap_func, const void* aux) {
-  return ResultExpr(
-      new const TrapResultExprImpl(trap_func, aux, false /* unsafe */));
+  return std::make_shared<TrapResultExprImpl>(trap_func, aux,
+                                              false /* unsafe */);
 }
 
 BoolExpr BoolConst(bool value) {
-  return BoolExpr(new const ConstBoolExprImpl(value));
+  return std::make_shared<ConstBoolExprImpl>(value);
 }
 
-BoolExpr Not(const BoolExpr& cond) {
-  return BoolExpr(new const NegateBoolExprImpl(cond));
+BoolExpr Not(BoolExpr cond) {
+  return std::make_shared<NegateBoolExprImpl>(std::move(cond));
 }
 
 BoolExpr AllOf() {
   return BoolConst(true);
 }
 
-BoolExpr AllOf(const BoolExpr& lhs, const BoolExpr& rhs) {
-  return BoolExpr(new const AndBoolExprImpl(lhs, rhs));
+BoolExpr AllOf(BoolExpr lhs, BoolExpr rhs) {
+  return std::make_shared<AndBoolExprImpl>(std::move(lhs), std::move(rhs));
 }
 
 BoolExpr AnyOf() {
   return BoolConst(false);
 }
 
-BoolExpr AnyOf(const BoolExpr& lhs, const BoolExpr& rhs) {
-  return BoolExpr(new const OrBoolExprImpl(lhs, rhs));
+BoolExpr AnyOf(BoolExpr lhs, BoolExpr rhs) {
+  return std::make_shared<OrBoolExprImpl>(std::move(lhs), std::move(rhs));
 }
 
-Elser If(const BoolExpr& cond, const ResultExpr& then_result) {
-  return Elser(nullptr).ElseIf(cond, then_result);
+Elser If(BoolExpr cond, ResultExpr then_result) {
+  return Elser(nullptr).ElseIf(std::move(cond), std::move(then_result));
 }
 
 Elser::Elser(cons::List<Clause> clause_list) : clause_list_(clause_list) {
@@ -306,11 +298,12 @@
 Elser::~Elser() {
 }
 
-Elser Elser::ElseIf(const BoolExpr& cond, const ResultExpr& then_result) const {
-  return Elser(Cons(std::make_pair(cond, then_result), clause_list_));
+Elser Elser::ElseIf(BoolExpr cond, ResultExpr then_result) const {
+  return Elser(Cons(std::make_pair(std::move(cond), std::move(then_result)),
+                    clause_list_));
 }
 
-ResultExpr Elser::Else(const ResultExpr& else_result) const {
+ResultExpr Elser::Else(ResultExpr else_result) const {
   // We finally have the default result expression for this
   // if/then/else sequence.  Also, we've already accumulated all
   // if/then pairs into a list of reverse order (i.e., lower priority
@@ -333,10 +326,10 @@
   //
   // and end up with an appropriately chained tree.
 
-  ResultExpr expr = else_result;
+  ResultExpr expr = std::move(else_result);
   for (const Clause& clause : clause_list_) {
-    expr = ResultExpr(
-        new const IfThenResultExprImpl(clause.first, clause.second, expr));
+    expr = std::make_shared<IfThenResultExprImpl>(clause.first, clause.second,
+                                                  std::move(expr));
   }
   return expr;
 }
@@ -344,5 +337,7 @@
 }  // namespace bpf_dsl
 }  // namespace sandbox
 
-template class scoped_refptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
-template class scoped_refptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+namespace std {
+template class shared_ptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
+template class shared_ptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+}  // namespace std
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl.h b/sandbox/linux/bpf_dsl/bpf_dsl.h
index ffd20ff..7f81344 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl.h
@@ -8,11 +8,11 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <utility>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
 #include "sandbox/linux/bpf_dsl/cons.h"
 #include "sandbox/linux/bpf_dsl/trap_registry.h"
@@ -77,10 +77,10 @@
 namespace bpf_dsl {
 
 // ResultExpr is an opaque reference to an immutable result expression tree.
-typedef scoped_refptr<const internal::ResultExprImpl> ResultExpr;
+using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
 
 // BoolExpr is an opaque reference to an immutable boolean expression tree.
-typedef scoped_refptr<const internal::BoolExprImpl> BoolExpr;
+using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
 
 // Allow specifies a result that the system call should be allowed to
 // execute normally.
@@ -121,21 +121,21 @@
 SANDBOX_EXPORT BoolExpr BoolConst(bool value);
 
 // Not returns a BoolExpr representing the logical negation of |cond|.
-SANDBOX_EXPORT BoolExpr Not(const BoolExpr& cond);
+SANDBOX_EXPORT BoolExpr Not(BoolExpr cond);
 
 // AllOf returns a BoolExpr representing the logical conjunction ("and")
 // of zero or more BoolExprs.
 SANDBOX_EXPORT BoolExpr AllOf();
-SANDBOX_EXPORT BoolExpr AllOf(const BoolExpr& lhs, const BoolExpr& rhs);
+SANDBOX_EXPORT BoolExpr AllOf(BoolExpr lhs, BoolExpr rhs);
 template <typename... Rest>
-SANDBOX_EXPORT BoolExpr AllOf(const BoolExpr& first, const Rest&... rest);
+SANDBOX_EXPORT BoolExpr AllOf(BoolExpr first, Rest&&... rest);
 
 // AnyOf returns a BoolExpr representing the logical disjunction ("or")
 // of zero or more BoolExprs.
 SANDBOX_EXPORT BoolExpr AnyOf();
-SANDBOX_EXPORT BoolExpr AnyOf(const BoolExpr& lhs, const BoolExpr& rhs);
+SANDBOX_EXPORT BoolExpr AnyOf(BoolExpr lhs, BoolExpr rhs);
 template <typename... Rest>
-SANDBOX_EXPORT BoolExpr AnyOf(const BoolExpr& first, const Rest&... rest);
+SANDBOX_EXPORT BoolExpr AnyOf(BoolExpr first, Rest&&... rest);
 
 template <typename T>
 class SANDBOX_EXPORT Arg {
@@ -173,7 +173,7 @@
 
 // If begins a conditional result expression predicated on the
 // specified boolean expression.
-SANDBOX_EXPORT Elser If(const BoolExpr& cond, const ResultExpr& then_result);
+SANDBOX_EXPORT Elser If(BoolExpr cond, ResultExpr then_result);
 
 class SANDBOX_EXPORT Elser {
  public:
@@ -182,20 +182,20 @@
 
   // ElseIf extends the conditional result expression with another
   // "if then" clause, predicated on the specified boolean expression.
-  Elser ElseIf(const BoolExpr& cond, const ResultExpr& then_result) const;
+  Elser ElseIf(BoolExpr cond, ResultExpr then_result) const;
 
   // Else terminates a conditional result expression using |else_result| as
   // the default fallback result expression.
-  ResultExpr Else(const ResultExpr& else_result) const;
+  ResultExpr Else(ResultExpr else_result) const;
 
  private:
-  typedef std::pair<BoolExpr, ResultExpr> Clause;
+  using Clause = std::pair<BoolExpr, ResultExpr>;
 
   explicit Elser(cons::List<Clause> clause_list);
 
   cons::List<Clause> clause_list_;
 
-  friend Elser If(const BoolExpr&, const ResultExpr&);
+  friend Elser If(BoolExpr, ResultExpr);
   template <typename T>
   friend Caser<T> Switch(const Arg<T>&);
   DISALLOW_ASSIGN(Elser);
@@ -213,16 +213,16 @@
   ~Caser() {}
 
   // Case adds a single-value "case" clause to the switch.
-  Caser<T> Case(T value, const ResultExpr& result) const;
+  Caser<T> Case(T value, ResultExpr result) const;
 
   // Cases adds a multiple-value "case" clause to the switch.
   // See also the SANDBOX_BPF_DSL_CASES macro below for a more idiomatic way
   // of using this function.
   template <typename... Values>
-  Caser<T> CasesImpl(const ResultExpr& result, const Values&... values) const;
+  Caser<T> CasesImpl(ResultExpr result, const Values&... values) const;
 
   // Terminate the switch with a "default" clause.
-  ResultExpr Default(const ResultExpr& result) const;
+  ResultExpr Default(ResultExpr result) const;
 
  private:
   Caser(const Arg<T>& arg, Elser elser) : arg_(arg), elser_(elser) {}
@@ -299,34 +299,34 @@
 }
 
 template <typename T>
-Caser<T> Caser<T>::Case(T value, const ResultExpr& result) const {
-  return SANDBOX_BPF_DSL_CASES((value), result);
+Caser<T> Caser<T>::Case(T value, ResultExpr result) const {
+  return SANDBOX_BPF_DSL_CASES((value), std::move(result));
 }
 
 template <typename T>
 template <typename... Values>
-Caser<T> Caser<T>::CasesImpl(const ResultExpr& result,
-                             const Values&... values) const {
+Caser<T> Caser<T>::CasesImpl(ResultExpr result, const Values&... values) const {
   // Theoretically we could evaluate arg_ just once and emit a more efficient
   // dispatch table, but for now we simply translate into an equivalent
   // If/ElseIf/Else chain.
 
-  return Caser<T>(arg_, elser_.ElseIf(AnyOf((arg_ == values)...), result));
+  return Caser<T>(arg_,
+                  elser_.ElseIf(AnyOf((arg_ == values)...), std::move(result)));
 }
 
 template <typename T>
-ResultExpr Caser<T>::Default(const ResultExpr& result) const {
-  return elser_.Else(result);
+ResultExpr Caser<T>::Default(ResultExpr result) const {
+  return elser_.Else(std::move(result));
 }
 
 template <typename... Rest>
-BoolExpr AllOf(const BoolExpr& first, const Rest&... rest) {
-  return AllOf(first, AllOf(rest...));
+BoolExpr AllOf(BoolExpr first, Rest&&... rest) {
+  return AllOf(std::move(first), AllOf(std::forward<Rest>(rest)...));
 }
 
 template <typename... Rest>
-BoolExpr AnyOf(const BoolExpr& first, const Rest&... rest) {
-  return AnyOf(first, AnyOf(rest...));
+BoolExpr AnyOf(BoolExpr first, Rest&&... rest) {
+  return AnyOf(std::move(first), AnyOf(std::forward<Rest>(rest)...));
 }
 
 }  // namespace bpf_dsl
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
index 1830389..10477c9 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
@@ -5,7 +5,8 @@
 #ifndef SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
 #define SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
 
-#include "base/memory/ref_counted.h"
+#include <memory>
+
 #include "sandbox/sandbox_export.h"
 
 namespace sandbox {
@@ -20,8 +21,8 @@
 class BoolExprImpl;
 }
 
-typedef scoped_refptr<const internal::ResultExprImpl> ResultExpr;
-typedef scoped_refptr<const internal::BoolExprImpl> BoolExpr;
+using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
+using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
 
 template <typename T>
 class Arg;
@@ -34,9 +35,11 @@
 }  // namespace bpf_dsl
 }  // namespace sandbox
 
+namespace std {
 extern template class SANDBOX_EXPORT
-    scoped_refptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
+    shared_ptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
 extern template class SANDBOX_EXPORT
-    scoped_refptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+    shared_ptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+}  // namespace std
 
 #endif  // SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
index 0064f8a..35ff64f 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
@@ -5,8 +5,9 @@
 #ifndef SANDBOX_LINUX_BPF_DSL_BPF_DSL_IMPL_H_
 #define SANDBOX_LINUX_BPF_DSL_BPF_DSL_IMPL_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/linux/bpf_dsl/codegen.h"
 #include "sandbox/sandbox_export.h"
 
@@ -18,7 +19,7 @@
 namespace internal {
 
 // Internal interface implemented by BoolExpr implementations.
-class BoolExprImpl : public base::RefCounted<BoolExprImpl> {
+class BoolExprImpl {
  public:
   // Compile uses |pc| to emit a CodeGen::Node that conditionally continues
   // to either |then_node| or |false_node|, depending on whether the represented
@@ -32,12 +33,11 @@
   virtual ~BoolExprImpl() {}
 
  private:
-  friend class base::RefCounted<BoolExprImpl>;
   DISALLOW_COPY_AND_ASSIGN(BoolExprImpl);
 };
 
 // Internal interface implemented by ResultExpr implementations.
-class ResultExprImpl : public base::RefCounted<ResultExprImpl> {
+class ResultExprImpl {
  public:
   // Compile uses |pc| to emit a CodeGen::Node that executes the
   // represented result expression.
@@ -58,7 +58,6 @@
   virtual ~ResultExprImpl() {}
 
  private:
-  friend class base::RefCounted<ResultExprImpl>;
   DISALLOW_COPY_AND_ASSIGN(ResultExprImpl);
 };
 
diff --git a/sandbox/linux/bpf_dsl/codegen.cc b/sandbox/linux/bpf_dsl/codegen.cc
index 647f55a..d88bd53 100644
--- a/sandbox/linux/bpf_dsl/codegen.cc
+++ b/sandbox/linux/bpf_dsl/codegen.cc
@@ -144,18 +144,4 @@
   return (program_.size() - 1) - target;
 }
 
-// TODO(mdempsky): Move into a general base::Tuple helper library.
-bool CodeGen::MemoKeyLess::operator()(const MemoKey& lhs,
-                                      const MemoKey& rhs) const {
-  if (base::get<0>(lhs) != base::get<0>(rhs))
-    return base::get<0>(lhs) < base::get<0>(rhs);
-  if (base::get<1>(lhs) != base::get<1>(rhs))
-    return base::get<1>(lhs) < base::get<1>(rhs);
-  if (base::get<2>(lhs) != base::get<2>(rhs))
-    return base::get<2>(lhs) < base::get<2>(rhs);
-  if (base::get<3>(lhs) != base::get<3>(rhs))
-    return base::get<3>(lhs) < base::get<3>(rhs);
-  return false;
-}
-
 }  // namespace sandbox
diff --git a/sandbox/linux/bpf_dsl/codegen.h b/sandbox/linux/bpf_dsl/codegen.h
index 03c3b23..3fc3f35 100644
--- a/sandbox/linux/bpf_dsl/codegen.h
+++ b/sandbox/linux/bpf_dsl/codegen.h
@@ -9,10 +9,10 @@
 #include <stdint.h>
 
 #include <map>
+#include <tuple>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/tuple.h"
 #include "sandbox/sandbox_export.h"
 
 struct sock_filter;
@@ -80,10 +80,7 @@
   Program Compile(Node head);
 
  private:
-  using MemoKey = base::Tuple<uint16_t, uint32_t, Node, Node>;
-  struct MemoKeyLess {
-    bool operator()(const MemoKey& lhs, const MemoKey& rhs) const;
-  };
+  using MemoKey = std::tuple<uint16_t, uint32_t, Node, Node>;
 
   // AppendInstruction adds a new instruction, ensuring that |jt| and
   // |jf| are within range as necessary for |code|.
@@ -112,7 +109,7 @@
   // if it's an unconditional jump to a node semantically-equivalent to N.
   std::vector<Node> equivalent_;
 
-  std::map<MemoKey, Node, MemoKeyLess> memos_;
+  std::map<MemoKey, Node> memos_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGen);
 };
diff --git a/sandbox/linux/bpf_dsl/cons.h b/sandbox/linux/bpf_dsl/cons.h
index be050f7..07ac3df 100644
--- a/sandbox/linux/bpf_dsl/cons.h
+++ b/sandbox/linux/bpf_dsl/cons.h
@@ -5,8 +5,9 @@
 #ifndef SANDBOX_LINUX_BPF_DSL_CONS_H_
 #define SANDBOX_LINUX_BPF_DSL_CONS_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/sandbox_export.h"
 
 namespace sandbox {
@@ -60,19 +61,19 @@
 
 // List represents a (possibly null) pointer to a cons cell.
 template <typename T>
-using List = scoped_refptr<const Cell<T>>;
+using List = std::shared_ptr<const Cell<T>>;
 
 // Cons extends a cons list by prepending a new value to the front.
 template <typename T>
-List<T> Cons(const T& head, const List<T>& tail) {
-  return List<T>(new const Cell<T>(head, tail));
+List<T> Cons(const T& head, List<T> tail) {
+  return std::make_shared<Cell<T>>(head, std::move(tail));
 }
 
 // Cell represents an individual "cons cell" within a cons list.
 template <typename T>
-class Cell : public base::RefCounted<Cell<T>> {
+class Cell {
  public:
-  Cell(const T& head, const List<T>& tail) : head_(head), tail_(tail) {}
+  Cell(const T& head, List<T> tail) : head_(head), tail_(std::move(tail)) {}
 
   // Head returns this cell's head element.
   const T& head() const { return head_; }
@@ -81,12 +82,9 @@
   const List<T>& tail() const { return tail_; }
 
  private:
-  virtual ~Cell() {}
-
   T head_;
   List<T> tail_;
 
-  friend class base::RefCounted<Cell<T>>;
   DISALLOW_COPY_AND_ASSIGN(Cell);
 };
 
diff --git a/sandbox/linux/sandbox_linux.gypi b/sandbox/linux/sandbox_linux.gypi
index f5b3e0f..e96ae9e 100644
--- a/sandbox/linux/sandbox_linux.gypi
+++ b/sandbox/linux/sandbox_linux.gypi
@@ -376,29 +376,15 @@
     [ 'OS=="android"', {
       'targets': [
       {
-        'target_name': 'sandbox_linux_unittests_stripped',
-        'type': 'none',
-        'dependencies': [ 'sandbox_linux_unittests' ],
-        'actions': [{
-          'action_name': 'strip sandbox_linux_unittests',
-          'inputs': [ '<(PRODUCT_DIR)/sandbox_linux_unittests' ],
-          'outputs': [ '<(PRODUCT_DIR)/sandbox_linux_unittests_stripped' ],
-          'action': [ '<(android_strip)', '<@(_inputs)', '-o', '<@(_outputs)' ],
-        }],
-      },
-      {
         'target_name': 'sandbox_linux_unittests_deps',
         'type': 'none',
         'dependencies': [
-          'sandbox_linux_unittests_stripped',
+          'sandbox_linux_unittests',
         ],
-        # For the component build, ensure dependent shared libraries are
-        # stripped and put alongside sandbox_linux_unittests to simplify pushing
-        # to the device.
         'variables': {
-           'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests_deps/',
-           'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests_stripped',
-           'include_main_binary': 0,
+           'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests__dist/',
+           'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests',
+           'include_main_binary': 1,
         },
         'includes': [
           '../../build/android/native_app_dependencies.gypi'
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
index dcf308c..fa40e72 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
@@ -5,6 +5,8 @@
 #ifndef SANDBOX_LINUX_SECCOMP_BPF_HELPERS_BASELINE_POLICY_H_
 #define SANDBOX_LINUX_SECCOMP_BPF_HELPERS_BASELINE_POLICY_H_
 
+#include <sys/types.h>
+
 #include "base/macros.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
 #include "sandbox/linux/bpf_dsl/policy.h"
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
index 32dc4d1..f0392b1 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
@@ -248,6 +248,19 @@
 TEST_BASELINE_SIGSYS(__NR_vserver);
 #endif
 
+#if defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
+BPF_TEST_C(BaselinePolicy, FutexEINVAL, BaselinePolicy) {
+  int ops[] = {
+      FUTEX_CMP_REQUEUE_PI, FUTEX_CMP_REQUEUE_PI_PRIVATE,
+      FUTEX_UNLOCK_PI_PRIVATE,
+  };
+
+  for (int op : ops) {
+    BPF_ASSERT_EQ(-1, syscall(__NR_futex, NULL, op, 0, NULL, NULL, 0));
+    BPF_ASSERT_EQ(EINVAL, errno);
+  }
+}
+#else
 BPF_DEATH_TEST_C(BaselinePolicy,
                  FutexWithRequeuePriorityInheritence,
                  DEATH_SEGV_MESSAGE(GetFutexErrorMessageContentForTests()),
@@ -271,6 +284,7 @@
   syscall(__NR_futex, NULL, FUTEX_UNLOCK_PI_PRIVATE, 0, NULL, NULL, 0);
   _exit(1);
 }
+#endif  // defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
 
 BPF_TEST_C(BaselinePolicy, PrctlDumpable, BaselinePolicy) {
   const int is_dumpable = prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
index 4b98366..56c4cb3 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
@@ -47,9 +47,8 @@
 #define PR_SET_VMA 0x53564d41
 #endif
 
-// https://android.googlesource.com/platform/system/core/+/lollipop-release/libcutils/sched_policy.c
-#if !defined(PR_SET_TIMERSLACK_PID)
-#define PR_SET_TIMERSLACK_PID 41
+#ifndef PR_SET_PTRACER
+#define PR_SET_PTRACER 0x59616d61
 #endif
 
 #endif  // defined(OS_ANDROID)
@@ -95,6 +94,18 @@
 #endif
 }
 
+// Ubuntu's version of glibc has a race condition in sem_post that can cause
+// it to call futex(2) with bogus op arguments. To workaround this, we need
+// to allow those futex(2) calls to fail with EINVAL, instead of crashing the
+// process. See crbug.com/598471.
+inline bool IsBuggyGlibcSemPost() {
+#if defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
+  return true;
+#else
+  return false;
+#endif
+}
+
 }  // namespace.
 
 #define CASES SANDBOX_BPF_DSL_CASES
@@ -142,9 +153,35 @@
   return Switch(option)
       .CASES((PR_GET_NAME, PR_SET_NAME, PR_GET_DUMPABLE, PR_SET_DUMPABLE
 #if defined(OS_ANDROID)
-              ,
-              PR_SET_VMA, PR_SET_TIMERSLACK_PID
-#endif
+              , PR_SET_VMA, PR_SET_PTRACER
+
+// Enable PR_SET_TIMERSLACK_PID, an Android custom prctl which is used in:
+// https://android.googlesource.com/platform/system/core/+/lollipop-release/libcutils/sched_policy.c.
+// Depending on the Android kernel version, this prctl may have different
+// values. Since we don't know the correct value for the running kernel, we must
+// allow them all.
+//
+// The effect is:
+// On 3.14 kernels, this allows PR_SET_TIMERSLACK_PID and 43 and 127 (invalid
+// prctls which will return EINVAL)
+// On 3.18 kernels, this allows PR_SET_TIMERSLACK_PID, PR_SET_THP_DISABLE, and
+// 127 (invalid).
+// On 4.1 kernels and up, this allows PR_SET_TIMERSLACK_PID, PR_SET_THP_DISABLE,
+// and PR_MPX_ENABLE_MANAGEMENT.
+
+// https://android.googlesource.com/kernel/common/+/android-3.14/include/uapi/linux/prctl.h
+#define PR_SET_TIMERSLACK_PID_1 41
+
+// https://android.googlesource.com/kernel/common/+/android-3.18/include/uapi/linux/prctl.h
+#define PR_SET_TIMERSLACK_PID_2 43
+
+// https://android.googlesource.com/kernel/common/+/android-4.1/include/uapi/linux/prctl.h and up
+#define PR_SET_TIMERSLACK_PID_3 127
+
+              , PR_SET_TIMERSLACK_PID_1
+              , PR_SET_TIMERSLACK_PID_2
+              , PR_SET_TIMERSLACK_PID_3
+#endif  // defined(OS_ANDROID)
               ),
              Allow())
       .Default(CrashSIGSYSPrctl());
@@ -249,15 +286,10 @@
   const uint64_t kAllowedFutexFlags = FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME;
   const Arg<int> op(1);
   return Switch(op & ~kAllowedFutexFlags)
-      .CASES((FUTEX_WAIT,
-              FUTEX_WAKE,
-              FUTEX_REQUEUE,
-              FUTEX_CMP_REQUEUE,
-              FUTEX_WAKE_OP,
-              FUTEX_WAIT_BITSET,
-              FUTEX_WAKE_BITSET),
+      .CASES((FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, FUTEX_CMP_REQUEUE,
+              FUTEX_WAKE_OP, FUTEX_WAIT_BITSET, FUTEX_WAKE_BITSET),
              Allow())
-      .Default(CrashSIGSYSFutex());
+      .Default(IsBuggyGlibcSemPost() ? Error(EINVAL) : CrashSIGSYSFutex());
 }
 
 ResultExpr RestrictGetSetpriority(pid_t target_pid) {
@@ -305,8 +337,16 @@
   static_assert(4 == sizeof(clockid_t), "clockid_t is not 32bit");
   const Arg<clockid_t> clockid(0);
   return Switch(clockid)
-      .CASES((CLOCK_MONOTONIC, CLOCK_MONOTONIC_COARSE, CLOCK_PROCESS_CPUTIME_ID,
-              CLOCK_REALTIME, CLOCK_REALTIME_COARSE, CLOCK_THREAD_CPUTIME_ID),
+      .CASES((
+#if defined(OS_ANDROID)
+              CLOCK_BOOTTIME,
+#endif
+              CLOCK_MONOTONIC,
+              CLOCK_MONOTONIC_COARSE,
+              CLOCK_PROCESS_CPUTIME_ID,
+              CLOCK_REALTIME,
+              CLOCK_REALTIME_COARSE,
+              CLOCK_THREAD_CPUTIME_ID),
              Allow())
       .Default(CrashSIGSYS());
 }
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
index 280211a..804a8fe 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
@@ -79,6 +79,9 @@
   CheckClock(CLOCK_MONOTONIC);
   CheckClock(CLOCK_MONOTONIC_COARSE);
   CheckClock(CLOCK_PROCESS_CPUTIME_ID);
+#if defined(OS_ANDROID)
+  CheckClock(CLOCK_BOOTTIME);
+#endif
   CheckClock(CLOCK_REALTIME);
   CheckClock(CLOCK_REALTIME_COARSE);
   CheckClock(CLOCK_THREAD_CPUTIME_ID);
@@ -154,7 +157,9 @@
 BPF_TEST_C(ParameterRestrictions,
            sched_getparam_allowed,
            RestrictSchedPolicy) {
-  base::WaitableEvent thread_run(true, false);
+  base::WaitableEvent thread_run(
+      base::WaitableEvent::ResetPolicy::MANUAL,
+      base::WaitableEvent::InitialState::NOT_SIGNALED);
   // Run the actual test in a new thread so that the current pid and tid are
   // different.
   base::Thread getparam_thread("sched_getparam_thread");
diff --git a/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h b/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
index 00d415c..a4315ba 100644
--- a/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
+++ b/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
@@ -5,8 +5,9 @@
 #ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTER_COMPATIBILITY_DELEGATE_H_
 #define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTER_COMPATIBILITY_DELEGATE_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_test_runner.h"
 
 namespace sandbox {
@@ -28,12 +29,12 @@
 
   ~BPFTesterCompatibilityDelegate() override {}
 
-  scoped_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+  std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
     // The current method is guaranteed to only run in the child process
     // running the test. In this process, the current object is guaranteed
     // to live forever. So it's ok to pass aux_pointer_for_policy_ to
     // the policy, which could in turn pass it to the kernel via Trap().
-    return scoped_ptr<bpf_dsl::Policy>(new Policy(&aux_));
+    return std::unique_ptr<bpf_dsl::Policy>(new Policy(&aux_));
   }
 
   void RunTestFunction() override {
diff --git a/sandbox/linux/seccomp-bpf/bpf_tests.h b/sandbox/linux/seccomp-bpf/bpf_tests.h
index cc4debd..8b2b12a 100644
--- a/sandbox/linux/seccomp-bpf/bpf_tests.h
+++ b/sandbox/linux/seccomp-bpf/bpf_tests.h
@@ -5,6 +5,8 @@
 #ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
 #define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
 
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
 #include "build/build_config.h"
@@ -104,8 +106,8 @@
       : test_function_(test_function) {}
   ~BPFTesterSimpleDelegate() override {}
 
-  scoped_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
-    return scoped_ptr<bpf_dsl::Policy>(new PolicyClass());
+  std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+    return std::unique_ptr<bpf_dsl::Policy>(new PolicyClass());
   }
   void RunTestFunction() override {
     DCHECK(test_function_);
diff --git a/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc b/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
index e300baf..c16cd72 100644
--- a/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
+++ b/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
@@ -10,9 +10,10 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "build/build_config.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
 #include "sandbox/linux/bpf_dsl/policy.h"
@@ -76,7 +77,7 @@
   }
   {
     // Test polymorphism.
-    scoped_ptr<BPFTesterDelegate> simple_delegate(
+    std::unique_ptr<BPFTesterDelegate> simple_delegate(
         new BPFTesterCompatibilityDelegate<EmptyClassTakingPolicy, FourtyTwo>(
             DummyTestFunction));
   }
@@ -113,8 +114,8 @@
   BasicBPFTesterDelegate() {}
   ~BasicBPFTesterDelegate() override {}
 
-  scoped_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
-    return scoped_ptr<bpf_dsl::Policy>(new EnosysPtracePolicy());
+  std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+    return std::unique_ptr<bpf_dsl::Policy>(new EnosysPtracePolicy());
   }
   void RunTestFunction() override {
     errno = 0;
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf.cc b/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
index 5cf6c2e..4d8d436 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
@@ -14,8 +14,8 @@
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
 #include "sandbox/linux/bpf_dsl/codegen.h"
 #include "sandbox/linux/bpf_dsl/policy.h"
@@ -31,7 +31,6 @@
 #include "sandbox/linux/system_headers/linux_filter.h"
 #include "sandbox/linux/system_headers/linux_seccomp.h"
 #include "sandbox/linux/system_headers/linux_syscalls.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf.h b/sandbox/linux/seccomp-bpf/sandbox_bpf.h
index e758e03..1637b26 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf.h
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf.h
@@ -7,9 +7,10 @@
 
 #include <stdint.h>
 
+#include <memory>
+
 #include "base/files/scoped_file.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/bpf_dsl/codegen.h"
 #include "sandbox/sandbox_export.h"
 
@@ -104,7 +105,7 @@
 
   base::ScopedFD proc_fd_;
   bool sandbox_has_started_;
-  scoped_ptr<bpf_dsl::Policy> policy_;
+  std::unique_ptr<bpf_dsl::Policy> policy_;
 
   DISALLOW_COPY_AND_ASSIGN(SandboxBPF);
 };
diff --git a/sandbox/linux/seccomp-bpf/syscall.cc b/sandbox/linux/seccomp-bpf/syscall.cc
index bc6461f..4d55936 100644
--- a/sandbox/linux/seccomp-bpf/syscall.cc
+++ b/sandbox/linux/seccomp-bpf/syscall.cc
@@ -190,12 +190,15 @@
     "9:.size SyscallAsm, 9b-SyscallAsm\n"
 #elif defined(__mips__)
     ".text\n"
+    ".option pic2\n"
     ".align 4\n"
+    ".global SyscallAsm\n"
     ".type SyscallAsm, @function\n"
     "SyscallAsm:.ent SyscallAsm\n"
     ".frame  $sp, 40, $ra\n"
     ".set   push\n"
     ".set   noreorder\n"
+    ".cpload $t9\n"
     "addiu  $sp, $sp, -40\n"
     "sw     $ra, 36($sp)\n"
     // Check if "v0" is negative. If so, do not attempt to make a
@@ -204,7 +207,11 @@
     // used as a marker that BPF code inspects.
     "bgez   $v0, 1f\n"
     " nop\n"
-    "la     $v0, 2f\n"
+    // This is equivalent to "la $v0, 2f".
+    // LA macro has to be avoided since LLVM-AS has issue with LA in PIC mode
+    // https://llvm.org/bugs/show_bug.cgi?id=27644
+    "lw     $v0, %got(2f)($gp)\n"
+    "addiu  $v0, $v0, %lo(2f)\n"
     "b      2f\n"
     " nop\n"
     // On MIPS first four arguments go to registers a0 - a3 and any
@@ -262,6 +269,10 @@
 extern "C" {
 intptr_t SyscallAsm(intptr_t nr, const intptr_t args[6]);
 }
+#elif defined(__mips__)
+extern "C" {
+intptr_t SyscallAsm(intptr_t nr, const intptr_t args[8]);
+}
 #endif
 
 }  // namespace
@@ -395,20 +406,21 @@
                                     const intptr_t* args,
                                     intptr_t* err_ret) {
   register intptr_t ret __asm__("v0") = nr;
+  register intptr_t syscallasm __asm__("t9") = (intptr_t) &SyscallAsm;
   // a3 register becomes non zero on error.
   register intptr_t err_stat __asm__("a3") = 0;
   {
     register const intptr_t* data __asm__("a0") = args;
     asm volatile(
-        "la $t9, SyscallAsm\n"
         "jalr $t9\n"
         " nop\n"
         : "=r"(ret), "=r"(err_stat)
         : "0"(ret),
-          "r"(data)
+          "r"(data),
+          "r"(syscallasm)
           // a2 is in the clober list so inline assembly can not change its
           // value.
-        : "memory", "ra", "t9", "a2");
+        : "memory", "ra", "a2");
   }
 
   // Set an error status so it can be used outside of this function
diff --git a/sandbox/linux/services/credentials.cc b/sandbox/linux/services/credentials.cc
index 9e57c56..0c617d4 100644
--- a/sandbox/linux/services/credentials.cc
+++ b/sandbox/linux/services/credentials.cc
@@ -16,13 +16,14 @@
 #include <unistd.h>
 
 #include "base/bind.h"
+#include "base/compiler_specific.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/launch.h"
-#include "base/template_util.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "build/build_config.h"
 #include "sandbox/linux/services/namespace_utils.h"
 #include "sandbox/linux/services/proc_util.h"
@@ -30,7 +31,6 @@
 #include "sandbox/linux/services/thread_helpers.h"
 #include "sandbox/linux/system_headers/capability.h"
 #include "sandbox/linux/system_headers/linux_signal.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
@@ -94,9 +94,9 @@
   // /proc/tid directory for the thread (since /proc may not be aware of the
   // PID namespace). With a process, we can just use /proc/self.
   pid_t pid = -1;
-  char stack_buf[PTHREAD_STACK_MIN];
+  char stack_buf[PTHREAD_STACK_MIN] ALIGNAS(16);
 #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS64_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
   // The stack grows downward.
   void* stack = stack_buf + sizeof(stack_buf);
 #else
diff --git a/sandbox/linux/services/credentials.h b/sandbox/linux/services/credentials.h
index 095d636..b89a6aa 100644
--- a/sandbox/linux/services/credentials.h
+++ b/sandbox/linux/services/credentials.h
@@ -16,7 +16,6 @@
 
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/system_headers/capability.h"
 #include "sandbox/sandbox_export.h"
 
diff --git a/sandbox/linux/services/credentials_unittest.cc b/sandbox/linux/services/credentials_unittest.cc
index d666a0c..b95ba0b 100644
--- a/sandbox/linux/services/credentials_unittest.cc
+++ b/sandbox/linux/services/credentials_unittest.cc
@@ -15,13 +15,13 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/services/proc_util.h"
 #include "sandbox/linux/services/syscall_wrappers.h"
 #include "sandbox/linux/system_headers/capability.h"
@@ -40,7 +40,7 @@
 };
 
 // Wrapper to manage libcap2's cap_t type.
-typedef scoped_ptr<typeof(*((cap_t)0)), CapFreeDeleter> ScopedCap;
+typedef std::unique_ptr<typeof(*((cap_t)0)), CapFreeDeleter> ScopedCap;
 
 bool WorkingDirectoryIsRoot() {
   char current_dir[PATH_MAX];
diff --git a/sandbox/linux/services/namespace_sandbox_unittest.cc b/sandbox/linux/services/namespace_sandbox_unittest.cc
index 43e0ae5..c1acca6 100644
--- a/sandbox/linux/services/namespace_sandbox_unittest.cc
+++ b/sandbox/linux/services/namespace_sandbox_unittest.cc
@@ -16,7 +16,6 @@
 #include "base/files/file_enumerator.h"
 #include "base/files/file_path.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/process/launch.h"
 #include "base/process/process.h"
 #include "base/test/multiprocess_test.h"
diff --git a/sandbox/linux/services/namespace_utils.cc b/sandbox/linux/services/namespace_utils.cc
index 2c2b493..97add26 100644
--- a/sandbox/linux/services/namespace_utils.cc
+++ b/sandbox/linux/services/namespace_utils.cc
@@ -20,7 +20,7 @@
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/launch.h"
 #include "base/strings/safe_sprintf.h"
-#include "third_party/valgrind/valgrind.h"
+#include "base/third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/services/namespace_utils.h b/sandbox/linux/services/namespace_utils.h
index 7231033..ec5d241 100644
--- a/sandbox/linux/services/namespace_utils.h
+++ b/sandbox/linux/services/namespace_utils.h
@@ -7,9 +7,10 @@
 
 #include <sys/types.h>
 
+#include <type_traits>
+
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/template_util.h"
 #include "sandbox/sandbox_export.h"
 
 namespace sandbox {
@@ -17,7 +18,7 @@
 // Utility functions for using Linux namepaces.
 class SANDBOX_EXPORT NamespaceUtils {
  public:
-  static_assert((base::is_same<uid_t, gid_t>::value),
+  static_assert(std::is_same<uid_t, gid_t>::value,
                 "uid_t and gid_t must be the same type");
   // generic_id_t can be used for either uid_t or gid_t.
   typedef uid_t generic_id_t;
diff --git a/sandbox/linux/services/proc_util.cc b/sandbox/linux/services/proc_util.cc
index 247c29c..b6d58de 100644
--- a/sandbox/linux/services/proc_util.cc
+++ b/sandbox/linux/services/proc_util.cc
@@ -11,8 +11,9 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <memory>
+
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/string_number_conversions.h"
 
@@ -26,7 +27,7 @@
   }
 };
 
-typedef scoped_ptr<DIR, DIRCloser> ScopedDIR;
+typedef std::unique_ptr<DIR, DIRCloser> ScopedDIR;
 
 base::ScopedFD OpenDirectory(const char* path) {
   DCHECK(path);
diff --git a/sandbox/linux/services/syscall_wrappers.cc b/sandbox/linux/services/syscall_wrappers.cc
index 25cd28d..7132d2a 100644
--- a/sandbox/linux/services/syscall_wrappers.cc
+++ b/sandbox/linux/services/syscall_wrappers.cc
@@ -16,11 +16,11 @@
 
 #include "base/compiler_specific.h"
 #include "base/logging.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "build/build_config.h"
 #include "sandbox/linux/system_headers/capability.h"
 #include "sandbox/linux/system_headers/linux_signal.h"
 #include "sandbox/linux/system_headers/linux_syscalls.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
@@ -55,7 +55,7 @@
 #if defined(ARCH_CPU_X86_64)
   return syscall(__NR_clone, flags, child_stack, ptid, ctid, tls);
 #elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_MIPS64_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
   // CONFIG_CLONE_BACKWARDS defined.
   return syscall(__NR_clone, flags, child_stack, ptid, tls, ctid);
 #endif
diff --git a/sandbox/linux/services/syscall_wrappers_unittest.cc b/sandbox/linux/services/syscall_wrappers_unittest.cc
index 5ba5967..34ac740 100644
--- a/sandbox/linux/services/syscall_wrappers_unittest.cc
+++ b/sandbox/linux/services/syscall_wrappers_unittest.cc
@@ -13,12 +13,12 @@
 
 #include "base/logging.h"
 #include "base/posix/eintr_wrapper.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "build/build_config.h"
 #include "sandbox/linux/system_headers/linux_signal.h"
 #include "sandbox/linux/tests/test_utils.h"
 #include "sandbox/linux/tests/unit_tests.h"
 #include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/services/thread_helpers_unittests.cc b/sandbox/linux/services/thread_helpers_unittests.cc
index 6dcae0f..fe1080b 100644
--- a/sandbox/linux/services/thread_helpers_unittests.cc
+++ b/sandbox/linux/services/thread_helpers_unittests.cc
@@ -12,7 +12,6 @@
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/process_metrics.h"
 #include "base/threading/platform_thread.h"
diff --git a/sandbox/linux/syscall_broker/broker_host.cc b/sandbox/linux/syscall_broker/broker_host.cc
index 5d9d763..dd61dac 100644
--- a/sandbox/linux/syscall_broker/broker_host.cc
+++ b/sandbox/linux/syscall_broker/broker_host.cc
@@ -22,10 +22,10 @@
 #include "base/pickle.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/posix/unix_domain_socket_linux.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "sandbox/linux/syscall_broker/broker_common.h"
 #include "sandbox/linux/syscall_broker/broker_policy.h"
 #include "sandbox/linux/system_headers/linux_syscalls.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/syscall_broker/broker_process.cc b/sandbox/linux/syscall_broker/broker_process.cc
index 5ab8c6c..30713ce 100644
--- a/sandbox/linux/syscall_broker/broker_process.cc
+++ b/sandbox/linux/syscall_broker/broker_process.cc
@@ -19,7 +19,6 @@
 
 #include "base/callback.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/process_metrics.h"
 #include "build/build_config.h"
diff --git a/sandbox/linux/syscall_broker/broker_process.h b/sandbox/linux/syscall_broker/broker_process.h
index 8a512a0..3c0c809 100644
--- a/sandbox/linux/syscall_broker/broker_process.h
+++ b/sandbox/linux/syscall_broker/broker_process.h
@@ -5,12 +5,12 @@
 #ifndef SANDBOX_LINUX_SERVICES_BROKER_PROCESS_H_
 #define SANDBOX_LINUX_SERVICES_BROKER_PROCESS_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/callback_forward.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/pickle.h"
 #include "base/process/process.h"
 #include "sandbox/linux/syscall_broker/broker_policy.h"
@@ -82,7 +82,7 @@
   const bool quiet_failures_for_tests_;
   pid_t broker_pid_;                     // The PID of the broker (child).
   syscall_broker::BrokerPolicy policy_;  // The sandboxing policy.
-  scoped_ptr<syscall_broker::BrokerClient> broker_client_;
+  std::unique_ptr<syscall_broker::BrokerClient> broker_client_;
 
   DISALLOW_COPY_AND_ASSIGN(BrokerProcess);
 };
diff --git a/sandbox/linux/syscall_broker/broker_process_unittest.cc b/sandbox/linux/syscall_broker/broker_process_unittest.cc
index 15e1ffb..229764a 100644
--- a/sandbox/linux/syscall_broker/broker_process_unittest.cc
+++ b/sandbox/linux/syscall_broker/broker_process_unittest.cc
@@ -15,6 +15,7 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -23,7 +24,6 @@
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/posix/unix_domain_socket_linux.h"
 #include "sandbox/linux/syscall_broker/broker_client.h"
@@ -58,7 +58,8 @@
   std::vector<BrokerFilePermission> permissions;
   permissions.push_back(BrokerFilePermission::ReadOnly("/proc/cpuinfo"));
 
-  scoped_ptr<BrokerProcess> open_broker(new BrokerProcess(EPERM, permissions));
+  std::unique_ptr<BrokerProcess> open_broker(
+      new BrokerProcess(EPERM, permissions));
   ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
 
   ASSERT_TRUE(TestUtils::CurrentProcessHasChildren());
@@ -251,7 +252,7 @@
   std::vector<BrokerFilePermission> permissions;
 
   permissions.push_back(BrokerFilePermission::ReadOnlyRecursive("/proc/"));
-  scoped_ptr<BrokerProcess> open_broker(
+  std::unique_ptr<BrokerProcess> open_broker(
       new BrokerProcess(EPERM, permissions, fast_check_in_client));
   ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
   // Open cpuinfo via the broker.
@@ -310,7 +311,7 @@
   else
     permissions.push_back(BrokerFilePermission::ReadOnly(kFileCpuInfo));
 
-  scoped_ptr<BrokerProcess> open_broker(
+  std::unique_ptr<BrokerProcess> open_broker(
       new BrokerProcess(EPERM, permissions, fast_check_in_client));
   ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
 
@@ -489,9 +490,16 @@
   // expected.
 }
 
+#if defined(OS_LINUX)
+// Flaky on Linux NG bots: https://crbug.com/595199.
+#define MAYBE_RecvMsgDescriptorLeak DISABLED_RecvMsgDescriptorLeak
+#else
+#define MAYBE_RecvMsgDescriptorLeak RecvMsgDescriptorLeak
+#endif
+
 // We need to allow noise because the broker will log when it receives our
 // bogus IPCs.
-SANDBOX_TEST_ALLOW_NOISE(BrokerProcess, RecvMsgDescriptorLeak) {
+SANDBOX_TEST_ALLOW_NOISE(BrokerProcess, MAYBE_RecvMsgDescriptorLeak) {
   // Android creates a socket on first use of the LOG call.
   // We need to ensure this socket is open before we
   // begin the test.
diff --git a/sandbox/mac/BUILD.gn b/sandbox/mac/BUILD.gn
index cdaf527..fd53131 100644
--- a/sandbox/mac/BUILD.gn
+++ b/sandbox/mac/BUILD.gn
@@ -5,45 +5,6 @@
 import("//build/config/mac/mac_sdk.gni")
 import("//testing/test.gni")
 
-generate_stubs_script = "//tools/generate_stubs/generate_stubs.py"
-generate_stubs_header = "xpc_stubs_header.fragment"
-generate_stubs_sig_public = "xpc_stubs.sig"
-generate_stubs_sig_private = "xpc_private_stubs.sig"
-generate_stubs_project = "sandbox/mac"
-generate_stubs_output_stem = "xpc_stubs"
-
-action("generate_stubs") {
-  script = generate_stubs_script
-  sources = [
-    generate_stubs_sig_private,
-    generate_stubs_sig_public,
-  ]
-  inputs = [
-    generate_stubs_header,
-  ]
-  outputs = [
-    "$target_gen_dir/$generate_stubs_output_stem.cc",
-    "$target_gen_dir/$generate_stubs_output_stem.h",
-  ]
-  args = [
-    "-i",
-    rebase_path(target_gen_dir, root_build_dir),
-    "-o",
-    rebase_path(target_gen_dir, root_build_dir),
-    "-t",
-    "posix_stubs",
-    "-e",
-    rebase_path(generate_stubs_header, root_build_dir),
-    "-s",
-    generate_stubs_output_stem,
-    "-p",
-    generate_stubs_project,
-    "-x",
-    "SANDBOX_EXPORT",
-  ]
-  args += rebase_path(sources, root_build_dir)
-}
-
 component("sandbox") {
   sources = [
     "bootstrap_sandbox.cc",
@@ -59,7 +20,6 @@
     "policy.h",
     "pre_exec_delegate.cc",
     "pre_exec_delegate.h",
-    "xpc.cc",
     "xpc.h",
     "xpc_message_server.cc",
     "xpc_message_server.h",
@@ -71,15 +31,16 @@
   deps = [
     "//base",
   ]
+}
 
-  # When the build SDK is 10.6, generate a dynamic stub loader. When the
-  # SDK is higher, then libxpc.dylib will be loaded automatically as part
-  # of libSystem, and only forward declarations of private symbols are
-  # necessary.
-  if (mac_sdk_version == "10.6") {
-    deps += [ ":generate_stubs" ]
-    sources += get_target_outputs(":generate_stubs")
-  }
+component("seatbelt") {
+  sources = [
+    "seatbelt.cc",
+    "seatbelt.h",
+    "seatbelt_export.h",
+  ]
+  libs = [ "sandbox" ]
+  defines = [ "SEATBELT_IMPLEMENTATION" ]
 }
 
 test("sandbox_mac_unittests") {
diff --git a/sandbox/mac/sandbox_mac.gypi b/sandbox/mac/sandbox_mac.gypi
index 91ad20b..79740e5 100644
--- a/sandbox/mac/sandbox_mac.gypi
+++ b/sandbox/mac/sandbox_mac.gypi
@@ -5,6 +5,26 @@
 {
   'targets': [
     {
+      'target_name': 'seatbelt',
+      'type' : '<(component)',
+      'sources': [
+        'seatbelt.cc',
+        'seatbelt.h',
+        'seatbelt_export.h',
+      ],
+      'defines': [
+        'SEATBELT_IMPLEMENTATION',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'link_settings': {
+        'libraries': [
+          '$(SDKROOT)/usr/lib/libsandbox.dylib',
+        ],
+      }
+    },
+    {
       'target_name': 'sandbox',
       'type': '<(component)',
       'sources': [
@@ -21,7 +41,6 @@
         'policy.h',
         'pre_exec_delegate.cc',
         'pre_exec_delegate.h',
-        'xpc.cc',
         'xpc.h',
         'xpc_message_server.cc',
         'xpc_message_server.h',
@@ -41,52 +60,6 @@
           '$(SDKROOT)/usr/lib/libbsm.dylib',
         ],
       },
-      'conditions': [
-        # When the build SDK is 10.6, generate a dynamic stub loader. When the
-        # SDK is higher, then libxpc.dylib will be loaded automatically as part
-        # of libSystem, and only forward declarations of private symbols are
-        # necessary.
-        ['mac_sdk == "10.6"', {
-          'actions': [
-            {
-              'variables': {
-                'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
-                'generate_stubs_header_path': 'xpc_stubs_header.fragment',
-                'generate_stubs_sig_public_path': 'xpc_stubs.sig',
-                'generate_stubs_sig_private_path': 'xpc_private_stubs.sig',
-                'generate_stubs_project': 'sandbox/mac',
-                'generate_stubs_output_stem': 'xpc_stubs',
-              },
-              'action_name': 'generate_stubs',
-              'inputs': [
-                '<(generate_stubs_script)',
-                '<(generate_stubs_header_path)',
-                '<(generate_stubs_sig_public_path)',
-                '<(generate_stubs_sig_private_path)',
-              ],
-              'outputs': [
-                '<(INTERMEDIATE_DIR)/<(generate_stubs_output_stem).cc',
-                '<(SHARED_INTERMEDIATE_DIR)/<(generate_stubs_project)/<(generate_stubs_output_stem).h',
-              ],
-              'action': [
-                'python',
-                '<(generate_stubs_script)',
-                '-i', '<(INTERMEDIATE_DIR)',
-                '-o', '<(SHARED_INTERMEDIATE_DIR)/<(generate_stubs_project)',
-                '-t', 'posix_stubs',
-                '-e', '<(generate_stubs_header_path)',
-                '-s', '<(generate_stubs_output_stem)',
-                '-p', '<(generate_stubs_project)',
-                '-x', 'SANDBOX_EXPORT',
-                '<(generate_stubs_sig_public_path)',
-                '<(generate_stubs_sig_private_path)',
-              ],
-              'process_outputs_as_sources': 1,
-              'message': 'Generating XPC stubs for 10.6 compatability.',
-            },
-          ],
-        }],
-      ],
     },
     {
       'target_name': 'sandbox_mac_unittests',
diff --git a/sandbox/mac/xpc_private_stubs.sig b/sandbox/mac/xpc_private_stubs.sig
deleted file mode 100644
index b8e1c50..0000000
--- a/sandbox/mac/xpc_private_stubs.sig
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains declarations of private XPC functions. This file is
-// used for both forward declarations of private symbols and to use with
-// tools/generate_stubs for creating a dynamic library loader.
-
-// Dictionary manipulation.
-void xpc_dictionary_set_mach_send(xpc_object_t dictionary, const char* name, mach_port_t port);
-void xpc_dictionary_get_audit_token(xpc_object_t dictionary, audit_token_t* token);
-
-// Raw object getters.
-mach_port_t xpc_mach_send_get_right(xpc_object_t value);
-
-// Pipe methods.
-xpc_pipe_t xpc_pipe_create_from_port(mach_port_t port, int flags);
-int xpc_pipe_receive(mach_port_t port, xpc_object_t* message);
-int xpc_pipe_routine(xpc_pipe_t pipe, xpc_object_t request, xpc_object_t* reply);
-int xpc_pipe_routine_reply(xpc_object_t reply);
-int xpc_pipe_simpleroutine(xpc_pipe_t pipe, xpc_object_t message);
-int xpc_pipe_routine_forward(xpc_pipe_t forward_to, xpc_object_t request);
diff --git a/sandbox/mac/xpc_stubs.sig b/sandbox/mac/xpc_stubs.sig
deleted file mode 100644
index b8e7699..0000000
--- a/sandbox/mac/xpc_stubs.sig
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains declarations of public XPC functions used in the sandbox.
-// This file is used with tools/generate_stubs for creating a dynamic library
-// loader.
-
-// XPC object management.
-void xpc_release(xpc_object_t object);
-
-// Dictionary manipulation.
-xpc_object_t xpc_dictionary_create(const char* const *keys, const xpc_object_t* values, size_t count);
-const char* xpc_dictionary_get_string(xpc_object_t dictionary, const char* key);
-uint64_t xpc_dictionary_get_uint64(xpc_object_t dictionary, const char* key);
-void xpc_dictionary_set_uint64(xpc_object_t dictionary, const char* key, uint64_t value);
-int64_t xpc_dictionary_get_int64(xpc_object_t dictionary, const char* key);
-void xpc_dictionary_set_int64(xpc_object_t dictionary, const char* key, int64_t value);
-bool xpc_dictionary_get_bool(xpc_object_t dictionary, const char* key);
-xpc_object_t xpc_dictionary_create_reply(xpc_object_t request);
diff --git a/sandbox/mac/xpc_stubs_header.fragment b/sandbox/mac/xpc_stubs_header.fragment
deleted file mode 100644
index 2aa81cc..0000000
--- a/sandbox/mac/xpc_stubs_header.fragment
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SANDBOX_MAC_XPC_STUBS_HEADER_FRAGMENT_
-#define SANDBOX_MAC_XPC_STUBS_HEADER_FRAGMENT_
-
-#include <bsm/libbsm.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include "sandbox/sandbox_export.h"
-
-// Declare or include public types.
-#if !defined(MAC_OS_X_VERSION_10_7) || \
-    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
-
-extern "C" {
-typedef void* xpc_object_t;
-}  // extern "C"
-
-#else
-
-#include <xpc/xpc.h>
-
-#endif
-
-// Declare private types.
-extern "C" {
-typedef struct _xpc_pipe_s* xpc_pipe_t;
-}  // extern "C"
-
-#if defined(MAC_OS_X_VERSION_10_7) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-// Redeclare methods that only exist on 10.7+ to suppress
-// -Wpartial-availability warnings.
-extern "C" {
-XPC_EXPORT XPC_NONNULL1 XPC_NONNULL2 void
-xpc_dictionary_set_int64(xpc_object_t xdict, const char* key, int64_t value);
-
-XPC_EXPORT XPC_NONNULL1 void xpc_release(xpc_object_t object);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL
-bool xpc_dictionary_get_bool(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL int64_t
-xpc_dictionary_get_int64(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL
-const char* xpc_dictionary_get_string(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL uint64_t
-xpc_dictionary_get_uint64(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_NONNULL1 XPC_NONNULL2 void
-xpc_dictionary_set_uint64(xpc_object_t xdict, const char* key, uint64_t value);
-
-XPC_EXPORT XPC_NONNULL1 XPC_NONNULL2
-void xpc_dictionary_set_string(xpc_object_t xdict, const char* key,
-                               const char* string);
-
-XPC_EXPORT XPC_MALLOC XPC_RETURNS_RETAINED XPC_WARN_RESULT xpc_object_t
-xpc_dictionary_create(const char* const* keys,
-                      const xpc_object_t* values,
-                      size_t count);
-XPC_EXPORT XPC_MALLOC XPC_RETURNS_RETAINED XPC_WARN_RESULT XPC_NONNULL_ALL
-    xpc_object_t
-    xpc_dictionary_create_reply(xpc_object_t original);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL1 XPC_NONNULL2
-xpc_object_t xpc_dictionary_get_value(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_MALLOC XPC_WARN_RESULT XPC_NONNULL1
-char* xpc_copy_description(xpc_object_t object);
-}  // extern "C"
-#endif
-
-#endif  // SANDBOX_MAC_XPC_STUBS_HEADER_FRAGMENT_
diff --git a/sandbox/win/BUILD.gn b/sandbox/win/BUILD.gn
index 327396b..60bb499 100644
--- a/sandbox/win/BUILD.gn
+++ b/sandbox/win/BUILD.gn
@@ -12,8 +12,6 @@
   sources = [
     "src/acl.cc",
     "src/acl.h",
-    "src/app_container.cc",
-    "src/app_container.h",
     "src/broker_services.cc",
     "src/broker_services.h",
     "src/crosscall_client.h",
@@ -32,12 +30,6 @@
     "src/handle_closer.h",
     "src/handle_closer_agent.cc",
     "src/handle_closer_agent.h",
-    "src/handle_dispatcher.cc",
-    "src/handle_dispatcher.h",
-    "src/handle_interception.cc",
-    "src/handle_interception.h",
-    "src/handle_policy.cc",
-    "src/handle_policy.h",
     "src/interception.cc",
     "src/interception.h",
     "src/interception_agent.cc",
@@ -141,7 +133,6 @@
 
   if (current_cpu == "x64") {
     sources += [
-      "src/Wow64_64.cc",
       "src/interceptors_64.cc",
       "src/interceptors_64.h",
       "src/resolver_64.cc",
@@ -149,8 +140,6 @@
     ]
   } else if (current_cpu == "x86") {
     sources += [
-      "src/Wow64.cc",
-      "src/Wow64.h",
       "src/resolver_32.cc",
       "src/service_resolver_32.cc",
       "src/sidestep/ia32_modrm_map.cpp",
@@ -199,7 +188,6 @@
     "src/file_policy_test.cc",
     "src/handle_closer_test.cc",
     "src/handle_inheritance_test.cc",
-    "src/handle_policy_test.cc",
     "src/integrity_level_test.cc",
     "src/ipc_ping_test.cc",
     "src/lpc_policy_test.cc",
@@ -208,6 +196,7 @@
     "src/process_mitigations_test.cc",
     "src/process_policy_test.cc",
     "src/registry_policy_test.cc",
+    "src/restricted_token_test.cc",
     "src/sync_policy_test.cc",
     "src/sync_policy_test.h",
     "src/unload_dll_test.cc",
@@ -216,14 +205,36 @@
     "tests/common/test_utils.cc",
     "tests/common/test_utils.h",
     "tests/integration_tests/integration_tests.cc",
+    "tests/integration_tests/integration_tests_common.h",
     "tests/integration_tests/integration_tests_test.cc",
   ]
 
   deps = [
     ":sandbox",
+    ":sbox_integration_test_hook_dll",
+    ":sbox_integration_test_win_proc",
     "//base/test:test_support",
     "//testing/gtest",
   ]
+
+  libs = [ "dxva2.lib" ]
+}
+
+loadable_module("sbox_integration_test_hook_dll") {
+  sources = [
+    "tests/integration_tests/hooking_dll.cc",
+    "tests/integration_tests/integration_tests_common.h",
+  ]
+}
+
+executable("sbox_integration_test_win_proc") {
+  sources = [
+    "tests/integration_tests/hooking_win_proc.cc",
+    "tests/integration_tests/integration_tests_common.h",
+  ]
+
+  configs -= [ "//build/config/win:console" ]
+  configs += [ "//build/config/win:windowed" ]
 }
 
 test("sbox_validation_tests") {
@@ -241,11 +252,12 @@
     "//base/test:test_support",
     "//testing/gtest",
   ]
+
+  libs = [ "shlwapi.lib" ]
 }
 
 test("sbox_unittests") {
   sources = [
-    "src/app_container_unittest.cc",
     "src/interception_unittest.cc",
     "src/ipc_unittest.cc",
     "src/job_unittest.cc",
diff --git a/sandbox/win/OWNERS b/sandbox/win/OWNERS
index 85047f7..54a76c1 100644
--- a/sandbox/win/OWNERS
+++ b/sandbox/win/OWNERS
@@ -1,3 +1,4 @@
 cpu@chromium.org
+forshaw@chromium.org
 jschuh@chromium.org
 wfh@chromium.org
diff --git a/sandbox/win/sandbox_win.gypi b/sandbox/win/sandbox_win.gypi
index f0d275a..e9673aa 100644
--- a/sandbox/win/sandbox_win.gypi
+++ b/sandbox/win/sandbox_win.gypi
@@ -15,8 +15,6 @@
         'sources': [
             'src/acl.cc',
             'src/acl.h',
-            'src/app_container.cc',
-            'src/app_container.h',
             'src/broker_services.cc',
             'src/broker_services.h',
             'src/crosscall_client.h',
@@ -35,12 +33,6 @@
             'src/handle_closer.h',
             'src/handle_closer_agent.cc',
             'src/handle_closer_agent.h',
-            'src/handle_dispatcher.cc',
-            'src/handle_dispatcher.h',
-            'src/handle_interception.cc',
-            'src/handle_interception.h',
-            'src/handle_policy.cc',
-            'src/handle_policy.h',
             'src/interception.cc',
             'src/interception.h',
             'src/interception_agent.cc',
@@ -148,7 +140,6 @@
               'src/interceptors_64.h',
               'src/resolver_64.cc',
               'src/service_resolver_64.cc',
-              'src/Wow64_64.cc',
             ],
           }],
           ['target_arch=="ia32"', {
@@ -164,8 +155,6 @@
               'src/sidestep\mini_disassembler.h',
               'src/sidestep\preamble_patcher_with_stub.cpp',
               'src/sidestep\preamble_patcher.h',
-              'src/Wow64.cc',
-              'src/Wow64.h',
             ],
           }],
         ],
@@ -208,6 +197,8 @@
       'type': 'executable',
       'dependencies': [
         'sandbox',
+        'sbox_integration_test_hook_dll',
+        'sbox_integration_test_win_proc',
         '../base/base.gyp:test_support_base',
         '../testing/gtest.gyp:gtest',
       ],
@@ -216,7 +207,6 @@
         'src/app_container_test.cc',
         'src/file_policy_test.cc',
         'src/handle_inheritance_test.cc',
-        'src/handle_policy_test.cc',
         'tests/integration_tests/integration_tests_test.cc',
         'src/handle_closer_test.cc',
         'src/integrity_level_test.cc',
@@ -227,6 +217,7 @@
         'src/process_mitigations_test.cc',
         'src/process_policy_test.cc',
         'src/registry_policy_test.cc',
+        'src/restricted_token_test.cc',
         'src/sync_policy_test.cc',
         'src/sync_policy_test.h',
         'src/unload_dll_test.cc',
@@ -235,7 +226,38 @@
         'tests/common/test_utils.cc',
         'tests/common/test_utils.h',
         'tests/integration_tests/integration_tests.cc',
+        'tests/integration_tests/integration_tests_common.h',
       ],
+      'link_settings': {
+        'libraries': [
+          '-ldxva2.lib',
+        ],
+      },
+    },
+    {
+      'target_name': 'sbox_integration_test_hook_dll',
+      'type': 'shared_library',
+      'dependencies': [
+      ],
+      'sources': [
+        'tests/integration_tests/hooking_dll.cc',
+        'tests/integration_tests/integration_tests_common.h',
+      ],
+    },
+    {
+      'target_name': 'sbox_integration_test_win_proc',
+      'type': 'executable',
+      'dependencies': [
+      ],
+      'sources': [
+        'tests/integration_tests/hooking_win_proc.cc',
+        'tests/integration_tests/integration_tests_common.h',
+      ],
+      'msvs_settings': {
+        'VCLinkerTool': {
+          'SubSystem': '2',  # Set /SUBSYSTEM:WINDOWS
+        },
+      },
     },
     {
       'target_name': 'sbox_validation_tests',
@@ -253,6 +275,11 @@
         'tests/validation_tests/commands.h',
         'tests/validation_tests/suite.cc',
       ],
+      'link_settings': {
+        'libraries': [
+          '-lshlwapi.lib',
+        ],
+      },
     },
     {
       'target_name': 'sbox_unittests',
@@ -263,7 +290,6 @@
         '../testing/gtest.gyp:gtest',
       ],
       'sources': [
-        'src/app_container_unittest.cc',
         'src/interception_unittest.cc',
         'src/service_resolver_unittest.cc',
         'src/restricted_token_unittest.cc',
diff --git a/sandbox/win/src/interceptors.h b/sandbox/win/src/interceptors.h
index a17447a..44b34e3 100644
--- a/sandbox/win/src/interceptors.h
+++ b/sandbox/win/src/interceptors.h
@@ -19,7 +19,7 @@
   SET_INFORMATION_THREAD_ID,
   OPEN_THREAD_TOKEN_ID,
   OPEN_THREAD_TOKEN_EX_ID,
-  OPEN_TREAD_ID,
+  OPEN_THREAD_ID,
   OPEN_PROCESS_ID,
   OPEN_PROCESS_TOKEN_ID,
   OPEN_PROCESS_TOKEN_EX_ID,
@@ -34,6 +34,7 @@
   // Process-thread dispatcher:
   CREATE_PROCESSW_ID,
   CREATE_PROCESSA_ID,
+  CREATE_THREAD_ID,
   // Registry dispatcher:
   CREATE_KEY_ID,
   OPEN_KEY_ID,
@@ -45,6 +46,21 @@
   GDIINITIALIZE_ID,
   GETSTOCKOBJECT_ID,
   REGISTERCLASSW_ID,
+  ENUMDISPLAYMONITORS_ID,
+  ENUMDISPLAYDEVICESA_ID,
+  GETMONITORINFOA_ID,
+  GETMONITORINFOW_ID,
+  CREATEOPMPROTECTEDOUTPUTS_ID,
+  GETCERTIFICATE_ID,
+  GETCERTIFICATESIZE_ID,
+  GETCERTIFICATEBYHANDLE_ID,
+  GETCERTIFICATESIZEBYHANDLE_ID,
+  DESTROYOPMPROTECTEDOUTPUT_ID,
+  CONFIGUREOPMPROTECTEDOUTPUT_ID,
+  GETOPMINFORMATION_ID,
+  GETOPMRANDOMNUMBER_ID,
+  GETSUGGESTEDOPMPROTECTEDOUTPUTARRAYSIZE_ID,
+  SETOPMSIGNINGKEYANDSEQUENCENUMBERS_ID,
   INTERCEPTOR_MAX_ID
 };
 
diff --git a/sandbox/win/src/ipc_tags.h b/sandbox/win/src/ipc_tags.h
index d680411..1c754cd 100644
--- a/sandbox/win/src/ipc_tags.h
+++ b/sandbox/win/src/ipc_tags.h
@@ -28,10 +28,22 @@
   IPC_OPENEVENT_TAG,
   IPC_NTCREATEKEY_TAG,
   IPC_NTOPENKEY_TAG,
-  IPC_DUPLICATEHANDLEPROXY_TAG,
   IPC_GDI_GDIDLLINITIALIZE_TAG,
   IPC_GDI_GETSTOCKOBJECT_TAG,
   IPC_USER_REGISTERCLASSW_TAG,
+  IPC_CREATETHREAD_TAG,
+  IPC_USER_ENUMDISPLAYMONITORS_TAG,
+  IPC_USER_ENUMDISPLAYDEVICES_TAG,
+  IPC_USER_GETMONITORINFO_TAG,
+  IPC_GDI_CREATEOPMPROTECTEDOUTPUTS_TAG,
+  IPC_GDI_GETCERTIFICATE_TAG,
+  IPC_GDI_GETCERTIFICATESIZE_TAG,
+  IPC_GDI_DESTROYOPMPROTECTEDOUTPUT_TAG,
+  IPC_GDI_CONFIGUREOPMPROTECTEDOUTPUT_TAG,
+  IPC_GDI_GETOPMINFORMATION_TAG,
+  IPC_GDI_GETOPMRANDOMNUMBER_TAG,
+  IPC_GDI_GETSUGGESTEDOPMPROTECTEDOUTPUTARRAYSIZE_TAG,
+  IPC_GDI_SETOPMSIGNINGKEYANDSEQUENCENUMBERS_TAG,
   IPC_LAST_TAG
 };
 
diff --git a/sandbox/win/src/nt_internals.h b/sandbox/win/src/nt_internals.h
index 2a39d5b..6469c2b 100644
--- a/sandbox/win/src/nt_internals.h
+++ b/sandbox/win/src/nt_internals.h
@@ -30,6 +30,7 @@
 #define STATUS_PROCEDURE_NOT_FOUND    ((NTSTATUS)0xC000007AL)
 #define STATUS_INVALID_IMAGE_FORMAT   ((NTSTATUS)0xC000007BL)
 #define STATUS_NO_TOKEN               ((NTSTATUS)0xC000007CL)
+#define STATUS_NOT_SUPPORTED          ((NTSTATUS)0xC00000BBL)
 
 #define CURRENT_PROCESS ((HANDLE) -1)
 #define CURRENT_THREAD  ((HANDLE) -2)
@@ -332,18 +333,18 @@
   };
 } PROCESS_BASIC_INFORMATION, *PPROCESS_BASIC_INFORMATION;
 
-typedef NTSTATUS (WINAPI *NtQueryInformationProcessFunction)(
-  IN HANDLE ProcessHandle,
-  IN PROCESSINFOCLASS ProcessInformationClass,
-  OUT PVOID ProcessInformation,
-  IN ULONG ProcessInformationLength,
-  OUT PULONG ReturnLength OPTIONAL);
+typedef NTSTATUS(WINAPI* NtQueryInformationProcessFunction)(
+    IN HANDLE ProcessHandle,
+    IN PROCESSINFOCLASS ProcessInformationClass,
+    OUT PVOID ProcessInformation,
+    IN ULONG ProcessInformationLength,
+    OUT PULONG ReturnLength OPTIONAL);
 
-typedef NTSTATUS (WINAPI *NtSetInformationProcessFunction)(
-  HANDLE ProcessHandle,
-  IN PROCESSINFOCLASS ProcessInformationClass,
-  IN PVOID ProcessInformation,
-  IN ULONG ProcessInformationLength);
+typedef NTSTATUS(WINAPI* NtSetInformationProcessFunction)(
+    HANDLE ProcessHandle,
+    IN PROCESSINFOCLASS ProcessInformationClass,
+    IN PVOID ProcessInformation,
+    IN ULONG ProcessInformationLength);
 
 typedef NTSTATUS (WINAPI *NtOpenThreadTokenFunction) (
   IN HANDLE ThreadHandle,
@@ -369,21 +370,50 @@
   IN ULONG HandleAttributes,
   OUT PHANDLE TokenHandle);
 
-typedef NTSTATUS (WINAPI * RtlCreateUserThreadFunction)(
-  IN HANDLE Process,
-  IN PSECURITY_DESCRIPTOR ThreadSecurityDescriptor,
-  IN BOOLEAN CreateSuspended,
-  IN ULONG ZeroBits,
-  IN SIZE_T MaximumStackSize,
-  IN SIZE_T CommittedStackSize,
-  IN LPTHREAD_START_ROUTINE StartAddress,
-  IN PVOID Parameter,
-  OUT PHANDLE Thread,
-  OUT PCLIENT_ID ClientId);
+typedef NTSTATUS(WINAPI* NtQueryInformationTokenFunction)(
+    IN HANDLE TokenHandle,
+    IN TOKEN_INFORMATION_CLASS TokenInformationClass,
+    OUT PVOID TokenInformation,
+    IN ULONG TokenInformationLength,
+    OUT PULONG ReturnLength);
+
+typedef NTSTATUS(WINAPI* RtlCreateUserThreadFunction)(
+    IN HANDLE Process,
+    IN PSECURITY_DESCRIPTOR ThreadSecurityDescriptor,
+    IN BOOLEAN CreateSuspended,
+    IN ULONG ZeroBits,
+    IN SIZE_T MaximumStackSize,
+    IN SIZE_T CommittedStackSize,
+    IN LPTHREAD_START_ROUTINE StartAddress,
+    IN PVOID Parameter,
+    OUT PHANDLE Thread,
+    OUT PCLIENT_ID ClientId);
+
+typedef NTSTATUS(WINAPI* RtlConvertSidToUnicodeStringFunction)(
+    OUT PUNICODE_STRING UnicodeString,
+    IN PSID Sid,
+    IN BOOLEAN AllocateDestinationString);
+
+typedef VOID(WINAPI* RtlFreeUnicodeStringFunction)(
+    IN OUT PUNICODE_STRING UnicodeString);
 
 // -----------------------------------------------------------------------
 // Registry
 
+typedef enum _KEY_VALUE_INFORMATION_CLASS {
+  KeyValueFullInformation = 1
+} KEY_VALUE_INFORMATION_CLASS,
+    *PKEY_VALUE_INFORMATION_CLASS;
+
+typedef struct _KEY_VALUE_FULL_INFORMATION {
+  ULONG TitleIndex;
+  ULONG Type;
+  ULONG DataOffset;
+  ULONG DataLength;
+  ULONG NameLength;
+  WCHAR Name[1];
+} KEY_VALUE_FULL_INFORMATION, *PKEY_VALUE_FULL_INFORMATION;
+
 typedef NTSTATUS (WINAPI *NtCreateKeyFunction)(
   OUT PHANDLE KeyHandle,
   IN ACCESS_MASK DesiredAccess,
@@ -407,6 +437,24 @@
 typedef NTSTATUS (WINAPI *NtDeleteKeyFunction)(
   IN HANDLE KeyHandle);
 
+typedef NTSTATUS(WINAPI* RtlFormatCurrentUserKeyPathFunction)(
+    OUT PUNICODE_STRING RegistryPath);
+
+typedef NTSTATUS(WINAPI* NtQueryValueKeyFunction)(IN HANDLE KeyHandle,
+                                                  IN PUNICODE_STRING ValueName,
+                                                  IN KEY_VALUE_INFORMATION_CLASS
+                                                      KeyValueInformationClass,
+                                                  OUT PVOID KeyValueInformation,
+                                                  IN ULONG Length,
+                                                  OUT PULONG ResultLength);
+
+typedef NTSTATUS(WINAPI* NtSetValueKeyFunction)(IN HANDLE KeyHandle,
+                                                IN PUNICODE_STRING ValueName,
+                                                IN ULONG TitleIndex OPTIONAL,
+                                                IN ULONG Type,
+                                                IN PVOID Data,
+                                                IN ULONG DataSize);
+
 // -----------------------------------------------------------------------
 // Memory
 
@@ -644,6 +692,8 @@
   IN OUT PUNICODE_STRING DestinationString,
   IN PCWSTR SourceString);
 
+typedef ULONG (WINAPI* RtlNtStatusToDosErrorFunction)(NTSTATUS status);
+
 typedef enum _EVENT_TYPE {
   NotificationEvent,
   SynchronizationEvent
@@ -699,5 +749,164 @@
 
 const unsigned int NtProcessInformationAccessToken = 9;
 
+// -----------------------------------------------------------------------
+// GDI OPM API and Supported Calls
+
+#define DXGKMDT_OPM_OMAC_SIZE 16
+#define DXGKMDT_OPM_128_BIT_RANDOM_NUMBER_SIZE 16
+#define DXGKMDT_OPM_ENCRYPTED_PARAMETERS_SIZE 256
+#define DXGKMDT_OPM_CONFIGURE_SETTING_DATA_SIZE 4056
+#define DXGKMDT_OPM_GET_INFORMATION_PARAMETERS_SIZE 4056
+#define DXGKMDT_OPM_REQUESTED_INFORMATION_SIZE 4076
+#define DXGKMDT_OPM_HDCP_KEY_SELECTION_VECTOR_SIZE 5
+#define DXGKMDT_OPM_PROTECTION_TYPE_SIZE 4
+
+enum DXGKMDT_CERTIFICATE_TYPE {
+  DXGKMDT_OPM_CERTIFICATE = 0,
+  DXGKMDT_COPP_CERTIFICATE = 1,
+  DXGKMDT_UAB_CERTIFICATE = 2,
+  DXGKMDT_FORCE_ULONG = 0xFFFFFFFF
+};
+
+enum DXGKMDT_OPM_VIDEO_OUTPUT_SEMANTICS {
+  DXGKMDT_OPM_VOS_COPP_SEMANTICS = 0,
+  DXGKMDT_OPM_VOS_OPM_SEMANTICS = 1
+};
+
+enum DXGKMDT_DPCP_PROTECTION_LEVEL {
+  DXGKMDT_OPM_DPCP_OFF = 0,
+  DXGKMDT_OPM_DPCP_ON = 1,
+  DXGKMDT_OPM_DPCP_FORCE_ULONG = 0x7fffffff
+};
+
+enum DXGKMDT_OPM_HDCP_PROTECTION_LEVEL {
+  DXGKMDT_OPM_HDCP_OFF = 0,
+  DXGKMDT_OPM_HDCP_ON = 1,
+  DXGKMDT_OPM_HDCP_FORCE_ULONG = 0x7fffffff
+};
+
+enum DXGKMDT_OPM_HDCP_FLAG {
+  DXGKMDT_OPM_HDCP_FLAG_NONE = 0x00,
+  DXGKMDT_OPM_HDCP_FLAG_REPEATER = 0x01
+};
+
+enum DXGKMDT_OPM_PROTECTION_TYPE {
+  DXGKMDT_OPM_PROTECTION_TYPE_OTHER = 0x80000000,
+  DXGKMDT_OPM_PROTECTION_TYPE_NONE = 0x00000000,
+  DXGKMDT_OPM_PROTECTION_TYPE_COPP_COMPATIBLE_HDCP = 0x00000001,
+  DXGKMDT_OPM_PROTECTION_TYPE_ACP = 0x00000002,
+  DXGKMDT_OPM_PROTECTION_TYPE_CGMSA = 0x00000004,
+  DXGKMDT_OPM_PROTECTION_TYPE_HDCP = 0x00000008,
+  DXGKMDT_OPM_PROTECTION_TYPE_DPCP = 0x00000010,
+  DXGKMDT_OPM_PROTECTION_TYPE_MASK = 0x8000001F
+};
+
+typedef void* OPM_PROTECTED_OUTPUT_HANDLE;
+
+struct DXGKMDT_OPM_ENCRYPTED_PARAMETERS {
+  BYTE abEncryptedParameters[DXGKMDT_OPM_ENCRYPTED_PARAMETERS_SIZE];
+};
+
+struct DXGKMDT_OPM_OMAC {
+  BYTE abOMAC[DXGKMDT_OPM_OMAC_SIZE];
+};
+
+struct DXGKMDT_OPM_CONFIGURE_PARAMETERS {
+  DXGKMDT_OPM_OMAC omac;
+  GUID guidSetting;
+  ULONG ulSequenceNumber;
+  ULONG cbParametersSize;
+  BYTE abParameters[DXGKMDT_OPM_CONFIGURE_SETTING_DATA_SIZE];
+};
+
+struct DXGKMDT_OPM_RANDOM_NUMBER {
+  BYTE abRandomNumber[DXGKMDT_OPM_128_BIT_RANDOM_NUMBER_SIZE];
+};
+
+struct DXGKMDT_OPM_GET_INFO_PARAMETERS {
+  DXGKMDT_OPM_OMAC omac;
+  DXGKMDT_OPM_RANDOM_NUMBER rnRandomNumber;
+  GUID guidInformation;
+  ULONG ulSequenceNumber;
+  ULONG cbParametersSize;
+  BYTE abParameters[DXGKMDT_OPM_GET_INFORMATION_PARAMETERS_SIZE];
+};
+
+struct DXGKMDT_OPM_REQUESTED_INFORMATION {
+  DXGKMDT_OPM_OMAC omac;
+  ULONG cbRequestedInformationSize;
+  BYTE abRequestedInformation[DXGKMDT_OPM_REQUESTED_INFORMATION_SIZE];
+};
+
+struct DXGKMDT_OPM_SET_PROTECTION_LEVEL_PARAMETERS {
+  ULONG ulProtectionType;
+  ULONG ulProtectionLevel;
+  ULONG Reserved;
+  ULONG Reserved2;
+};
+
+struct DXGKMDT_OPM_STANDARD_INFORMATION {
+  DXGKMDT_OPM_RANDOM_NUMBER rnRandomNumber;
+  ULONG ulStatusFlags;
+  ULONG ulInformation;
+  ULONG ulReserved;
+  ULONG ulReserved2;
+};
+
+typedef NTSTATUS(WINAPI* GetSuggestedOPMProtectedOutputArraySizeFunction)(
+    PUNICODE_STRING device_name,
+    DWORD* suggested_output_array_size);
+
+typedef NTSTATUS(WINAPI* CreateOPMProtectedOutputsFunction)(
+    PUNICODE_STRING device_name,
+    DXGKMDT_OPM_VIDEO_OUTPUT_SEMANTICS vos,
+    DWORD output_array_size,
+    DWORD* num_in_output_array,
+    OPM_PROTECTED_OUTPUT_HANDLE* output_array);
+
+typedef NTSTATUS(WINAPI* GetCertificateFunction)(
+    PUNICODE_STRING device_name,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    BYTE* certificate,
+    ULONG certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateSizeFunction)(
+    PUNICODE_STRING device_name,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    ULONG* certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateByHandleFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    BYTE* certificate,
+    ULONG certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateSizeByHandleFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    ULONG* certificate_length);
+
+typedef NTSTATUS(WINAPI* DestroyOPMProtectedOutputFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output);
+
+typedef NTSTATUS(WINAPI* ConfigureOPMProtectedOutputFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    const DXGKMDT_OPM_CONFIGURE_PARAMETERS* parameters,
+    ULONG additional_parameters_size,
+    const BYTE* additional_parameters);
+
+typedef NTSTATUS(WINAPI* GetOPMInformationFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    const DXGKMDT_OPM_GET_INFO_PARAMETERS* parameters,
+    DXGKMDT_OPM_REQUESTED_INFORMATION* requested_information);
+
+typedef NTSTATUS(WINAPI* GetOPMRandomNumberFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    DXGKMDT_OPM_RANDOM_NUMBER* random_number);
+
+typedef NTSTATUS(WINAPI* SetOPMSigningKeyAndSequenceNumbersFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    const DXGKMDT_OPM_ENCRYPTED_PARAMETERS* parameters);
+
 #endif  // SANDBOX_WIN_SRC_NT_INTERNALS_H__
 
diff --git a/sandbox/win/src/sandbox_policy.h b/sandbox/win/src/sandbox_policy.h
index cc39c62..c0916ea 100644
--- a/sandbox/win/src/sandbox_policy.h
+++ b/sandbox/win/src/sandbox_policy.h
@@ -8,8 +8,6 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <string>
-
 #include "base/strings/string16.h"
 #include "sandbox/win/src/sandbox_types.h"
 #include "sandbox/win/src/security_level.h"
@@ -28,7 +26,6 @@
     SUBSYS_PROCESS,           // Creation of child processes.
     SUBSYS_REGISTRY,          // Creation and opening of registry keys.
     SUBSYS_SYNC,              // Creation of named sync objects.
-    SUBSYS_HANDLES,           // Duplication of handles to other processes.
     SUBSYS_WIN32K_LOCKDOWN    // Win32K Lockdown related policy.
   };
 
@@ -40,25 +37,25 @@
     FILES_ALLOW_QUERY,     // Allows access to query the attributes of a file.
     FILES_ALLOW_DIR_ANY,   // Allows open or create with directory semantics
                            // only.
-    HANDLES_DUP_ANY,       // Allows duplicating handles opened with any
-                           // access permissions.
-    HANDLES_DUP_BROKER,    // Allows duplicating handles to the broker process.
     NAMEDPIPES_ALLOW_ANY,  // Allows creation of a named pipe.
     PROCESS_MIN_EXEC,      // Allows to create a process with minimal rights
                            // over the resulting process and thread handles.
                            // No other parameters besides the command line are
                            // passed to the child process.
-    PROCESS_ALL_EXEC,      // Allows the creation of a process and return fill
+    PROCESS_ALL_EXEC,      // Allows the creation of a process and return full
                            // access on the returned handles.
                            // This flag can be used only when the main token of
                            // the sandboxed application is at least INTERACTIVE.
     EVENTS_ALLOW_ANY,      // Allows the creation of an event with full access.
-    EVENTS_ALLOW_READONLY, // Allows opening an even with synchronize access.
-    REG_ALLOW_READONLY,    // Allows readonly access to a registry key.
-    REG_ALLOW_ANY,         // Allows read and write access to a registry key.
-    FAKE_USER_GDI_INIT     // Fakes user32 and gdi32 initialization. This can
-                           // be used to allow the DLLs to load and initialize
-                           // even if the process cannot access that subsystem.
+    EVENTS_ALLOW_READONLY,  // Allows opening an even with synchronize access.
+    REG_ALLOW_READONLY,     // Allows readonly access to a registry key.
+    REG_ALLOW_ANY,          // Allows read and write access to a registry key.
+    FAKE_USER_GDI_INIT,     // Fakes user32 and gdi32 initialization. This can
+                            // be used to allow the DLLs to load and initialize
+                            // even if the process cannot access that subsystem.
+    IMPLEMENT_OPM_APIS      // Implements FAKE_USER_GDI_INIT and also exposes
+                            // IPC calls to handle Output Protection Manager
+                            // APIs.
   };
 
   // Increments the reference count of this object. The reference count must
@@ -135,6 +132,9 @@
   virtual ResultCode SetJobLevel(JobLevel job_level,
                                  uint32_t ui_exceptions) = 0;
 
+  // Returns the job level.
+  virtual JobLevel GetJobLevel() const = 0;
+
   // Sets a hard limit on the size of the commit set for the sandboxed process.
   // If the limit is reached, the process will be terminated with
   // SBOX_FATAL_MEMORY_EXCEEDED (7012).
@@ -172,17 +172,6 @@
   // than the current level, the sandbox will fail to start.
   virtual ResultCode SetDelayedIntegrityLevel(IntegrityLevel level) = 0;
 
-  // Sets the AppContainer to be used for the sandboxed process. Any capability
-  // to be enabled for the process should be added before this method is invoked
-  // (by calling SetCapability() as many times as needed).
-  // The desired AppContainer must be already installed on the system, otherwise
-  // launching the sandboxed process will fail. See BrokerServices for details
-  // about installing an AppContainer.
-  // Note that currently Windows restricts the use of impersonation within
-  // AppContainers, so this function is incompatible with the use of an initial
-  // token.
-  virtual ResultCode SetAppContainer(const wchar_t* sid) = 0;
-
   // Sets a capability to be enabled for the sandboxed process' AppContainer.
   virtual ResultCode SetCapability(const wchar_t* sid) = 0;
 
@@ -206,6 +195,10 @@
   // Returns the currently set delayed mitigation flags.
   virtual MitigationFlags GetDelayedProcessMitigations() const = 0;
 
+  // Disconnect the target from CSRSS when TargetServices::LowerToken() is
+  // called inside the target.
+  virtual void SetDisconnectCsrss() = 0;
+
   // Sets the interceptions to operate in strict mode. By default, interceptions
   // are performed in "relaxed" mode, where if something inside NTDLL.DLL is
   // already patched we attempt to intercept it anyway. Setting interceptions
@@ -246,11 +239,19 @@
   virtual ResultCode AddKernelObjectToClose(const wchar_t* handle_type,
                                             const wchar_t* handle_name) = 0;
 
-  // Adds a handle that will be shared with the target process.
-  // Returns the handle which was actually shared with the target. This is
-  // achieved by duplicating the handle to ensure that it is inheritable by
-  // the target. The caller should treat this as an opaque value.
-  virtual void* AddHandleToShare(HANDLE handle) = 0;
+  // Adds a handle that will be shared with the target process. Does not take
+  // ownership of the handle.
+  virtual void AddHandleToShare(HANDLE handle) = 0;
+
+  // Locks down the default DACL of the created lockdown and initial tokens
+  // to restrict what other processes are allowed to access a process' kernel
+  // resources.
+  virtual void SetLockdownDefaultDacl() = 0;
+
+  // Enable OPM API redirection when in Win32k lockdown.
+  virtual void SetEnableOPMRedirection() = 0;
+  // Enable OPM API emulation when in Win32k lockdown.
+  virtual bool GetEnableOPMRedirection() = 0;
 };
 
 }  // namespace sandbox
diff --git a/sandbox/win/src/sandbox_types.h b/sandbox/win/src/sandbox_types.h
index b749b9c..919086a 100644
--- a/sandbox/win/src/sandbox_types.h
+++ b/sandbox/win/src/sandbox_types.h
@@ -5,10 +5,16 @@
 #ifndef SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
 #define SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
 
+#include "base/process/launch.h"
+
 namespace sandbox {
 
 // Operation result codes returned by the sandbox API.
-enum ResultCode {
+//
+// Note: These codes are listed in a histogram and any new codes should be added
+// at the end.
+//
+enum ResultCode : int {
   SBOX_ALL_OK = 0,
   // Error is originating on the win32 layer. Call GetlastError() for more
   // information.
@@ -47,6 +53,56 @@
   SBOX_ERROR_PROC_THREAD_ATTRIBUTES = 17,
   // Error in creating process.
   SBOX_ERROR_CREATE_PROCESS = 18,
+  // Failure calling delegate PreSpawnTarget.
+  SBOX_ERROR_DELEGATE_PRE_SPAWN = 19,
+  // Could not assign process to job object.
+  SBOX_ERROR_ASSIGN_PROCESS_TO_JOB_OBJECT = 20,
+  // Could not assign process to job object.
+  SBOX_ERROR_SET_THREAD_TOKEN = 21,
+  // Could not get thread context of new process.
+  SBOX_ERROR_GET_THREAD_CONTEXT = 22,
+  // Could not duplicate target info of new process.
+  SBOX_ERROR_DUPLICATE_TARGET_INFO = 23,
+  // Could not set low box token.
+  SBOX_ERROR_SET_LOW_BOX_TOKEN = 24,
+  // Could not create file mapping for IPC dispatcher.
+  SBOX_ERROR_CREATE_FILE_MAPPING = 25,
+  // Could not duplicate shared section into target process for IPC dispatcher.
+  SBOX_ERROR_DUPLICATE_SHARED_SECTION = 26,
+  // Could not map view of shared memory in broker.
+  SBOX_ERROR_MAP_VIEW_OF_SHARED_SECTION = 27,
+  // Could not apply ASLR mitigations to target process.
+  SBOX_ERROR_APPLY_ASLR_MITIGATIONS = 28,
+  // Could not setup one of the required interception services.
+  SBOX_ERROR_SETUP_BASIC_INTERCEPTIONS = 29,
+  // Could not setup basic interceptions.
+  SBOX_ERROR_SETUP_INTERCEPTION_SERVICE = 30,
+  // Could not initialize interceptions. This usually means 3rd party software
+  // is stomping on our hooks, or can sometimes mean the syscall format has
+  // changed.
+  SBOX_ERROR_INITIALIZE_INTERCEPTIONS = 31,
+  // Could not setup the imports for ntdll in target process.
+  SBOX_ERROR_SETUP_NTDLL_IMPORTS = 32,
+  // Could not setup the handle closer in target process.
+  SBOX_ERROR_SETUP_HANDLE_CLOSER = 33,
+  // Cannot get the current Window Station.
+  SBOX_ERROR_CANNOT_GET_WINSTATION = 34,
+  // Cannot query the security attributes of the current Window Station.
+  SBOX_ERROR_CANNOT_QUERY_WINSTATION_SECURITY = 35,
+  // Cannot get the current Desktop.
+  SBOX_ERROR_CANNOT_GET_DESKTOP = 36,
+  // Cannot query the security attributes of the current Desktop.
+  SBOX_ERROR_CANNOT_QUERY_DESKTOP_SECURITY = 37,
+  // Cannot setup the interception manager config buffer.
+  SBOX_ERROR_CANNOT_SETUP_INTERCEPTION_CONFIG_BUFFER = 38,
+  // Cannot copy data to the child process.
+  SBOX_ERROR_CANNOT_COPY_DATA_TO_CHILD = 39,
+  // Cannot setup the interception thunk.
+  SBOX_ERROR_CANNOT_SETUP_INTERCEPTION_THUNK = 40,
+  // Cannot resolve the interception thunk.
+  SBOX_ERROR_CANNOT_RESOLVE_INTERCEPTION_THUNK = 41,
+  // Cannot write interception thunk to child process.
+  SBOX_ERROR_CANNOT_WRITE_INTERCEPTION_THUNK = 42,
   // Placeholder for last item of the enum.
   SBOX_ERROR_LAST
 };
diff --git a/sandbox/win/src/security_level.h b/sandbox/win/src/security_level.h
index 26ec306..d8524c1 100644
--- a/sandbox/win/src/security_level.h
+++ b/sandbox/win/src/security_level.h
@@ -183,15 +183,23 @@
 // PROCESS_CREATION_MITIGATION_POLICY_STRICT_HANDLE_CHECKS_ALWAYS_ON.
 const MitigationFlags MITIGATION_STRICT_HANDLE_CHECKS             = 0x00000100;
 
-// Prevents the process from making Win32k calls. Must be enabled after
-// startup. Corresponds to
+// Prevents the process from making Win32k calls. Corresponds to
 // PROCESS_CREATION_MITIGATION_POLICY_WIN32K_SYSTEM_CALL_DISABLE_ALWAYS_ON.
 const MitigationFlags MITIGATION_WIN32K_DISABLE                   = 0x00000200;
 
-// Disables common DLL injection methods (e.g. window hooks and
-// App_InitDLLs). Corresponds to
+// Prevents certain built-in third party extension points from being used.
+// - App_Init DLLs
+// - Winsock Layered Service Providers (LSPs)
+// - Global Windows Hooks (NOT thread-targeted hooks)
+// - Legacy Input Method Editors (IMEs).
+// I.e.: Disable legacy hooking mechanisms.  Corresponds to
 // PROCESS_CREATION_MITIGATION_POLICY_EXTENSION_POINT_DISABLE_ALWAYS_ON.
-const MitigationFlags MITIGATION_EXTENSION_DLL_DISABLE            = 0x00000400;
+const MitigationFlags MITIGATION_EXTENSION_POINT_DISABLE = 0x00000400;
+
+// Prevents the process from loading non-system fonts into GDI.
+// Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_FONT_DISABLE_ALWAYS_ON
+const MitigationFlags MITIGATION_NONSYSTEM_FONT_DISABLE = 0x00000800;
 
 // Sets the DLL search order to LOAD_LIBRARY_SEARCH_DEFAULT_DIRS. Additional
 // directories can be added via the Windows AddDllDirectory() function.
@@ -204,6 +212,14 @@
 // opening the process token for impersonate/duplicate/assignment.
 const MitigationFlags MITIGATION_HARDEN_TOKEN_IL_POLICY  = 0x00000001ULL << 33;
 
+// Blocks mapping of images from remote devices. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_REMOTE_ALWAYS_ON.
+const MitigationFlags MITIGATION_IMAGE_LOAD_NO_REMOTE = 0x00000001ULL << 52;
+
+// Blocks mapping of images that have the low manditory label. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_LOW_LABEL_ALWAYS_ON.
+const MitigationFlags MITIGATION_IMAGE_LOAD_NO_LOW_LABEL = 0x00000001ULL << 56;
+
 }  // namespace sandbox
 
 #endif  // SANDBOX_SRC_SECURITY_LEVEL_H_
diff --git a/sandbox/win/wow_helper/wow_helper.vcproj b/sandbox/win/wow_helper/wow_helper.vcproj
index 5482fbd..c8e7c9e 100644
--- a/sandbox/win/wow_helper/wow_helper.vcproj
+++ b/sandbox/win/wow_helper/wow_helper.vcproj
@@ -178,14 +178,6 @@
 	</References>
 	<Files>
 		<Filter
-			Name="base"
-			>
-			<File
-				RelativePath="..\..\base\scoped_ptr.h"
-				>
-			</File>
-		</Filter>
-		<Filter
 			Name="sandbox"
 			>
 			<File
diff --git a/testing/multiprocess_func_list.cc b/testing/multiprocess_func_list.cc
index 49ae07d..f96c2b5 100644
--- a/testing/multiprocess_func_list.cc
+++ b/testing/multiprocess_func_list.cc
@@ -40,7 +40,7 @@
       ProcessFunctions(main_func_ptr, setup_func_ptr);
 }
 
-int InvokeChildProcessTest(std::string test_name) {
+int InvokeChildProcessTest(const std::string& test_name) {
   MultiProcessTestMap& func_lookup_table = GetMultiprocessFuncMap();
   MultiProcessTestMap::iterator it = func_lookup_table.find(test_name);
   if (it != func_lookup_table.end()) {
diff --git a/testing/multiprocess_func_list.h b/testing/multiprocess_func_list.h
index f806d53..c3d2f1f 100644
--- a/testing/multiprocess_func_list.h
+++ b/testing/multiprocess_func_list.h
@@ -47,7 +47,7 @@
 
 // Invoke the main function of a test previously registered with
 // MULTIPROCESS_TEST_MAIN()
-int InvokeChildProcessTest(std::string test_name);
+int InvokeChildProcessTest(const std::string& test_name);
 
 // This macro creates a global MultiProcessTest::AppendMultiProcessTest object
 // whose constructor does the work of adding the global mapping.
diff --git a/third_party/libevent/event.h b/third_party/libevent/event.h
deleted file mode 100644
index 4a91e4b..0000000
--- a/third_party/libevent/event.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// The Chromium build contains its own checkout of libevent. This stub is used
-// when building the Chrome OS libchrome package to instead use the system
-// headers.
-#include <event.h>
diff --git a/third_party/valgrind/memcheck.h b/third_party/valgrind/memcheck.h
deleted file mode 100644
index 3cd08a9..0000000
--- a/third_party/valgrind/memcheck.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifdef ANDROID
-  #include "memcheck/memcheck.h"
-#else
-  // On Chrome OS, these files will be added in a patch applied in the ebuild.
-  #include <base/third_party/valgrind/memcheck.h>
-#endif
diff --git a/third_party/valgrind/valgrind.h b/third_party/valgrind/valgrind.h
deleted file mode 100644
index 779ef98..0000000
--- a/third_party/valgrind/valgrind.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifdef ANDROID
-  #include "include/valgrind.h"
-#else
-  // These files will be added in a patch applied in the ebuild.
-  #include <base/third_party/valgrind/valgrind.h>
-#endif