libchrome: Uprev the library to r395517 from Chromium

Pulled the latest and greatest version of libchrome from Chromium.

The merge was done against r395517 which corresponds to git commit
ebdcb576bb346af95b8ad219f6250daf63122f98 of May 23, 2016

Notable changes are:
- scoped_ptr was removed in favor of std::unique_ptr
- base/thread_task_runner_handle.h was moved to base/threading.

BUG: 28985443
TEST: All tests in libchrome_test pass on dragonboard-eng build

Change-Id: Ic9f9ed1cafe754c96cd2f007984514e091aaba39
diff --git a/Android.mk b/Android.mk
index 3af3573..3ddac77 100644
--- a/Android.mk
+++ b/Android.mk
@@ -35,6 +35,7 @@
 libchromeExportedCIncludes := $(LOCAL_PATH)
 
 libchromeCommonSrc := \
+	base/allocator/allocator_shim.cc \
 	base/at_exit.cc \
 	base/base64.cc \
 	base/base64url.cc \
@@ -68,7 +69,6 @@
 	base/files/scoped_file.cc \
 	base/files/scoped_temp_dir.cc \
 	base/guid.cc \
-	base/guid_posix.cc \
 	base/hash.cc \
 	base/json/json_file_value_serializer.cc \
 	base/json/json_parser.cc \
@@ -160,6 +160,7 @@
 	base/threading/non_thread_safe_impl.cc \
 	base/threading/platform_thread_posix.cc \
 	base/threading/post_task_and_reply_impl.cc \
+	base/threading/sequenced_task_runner_handle.cc \
 	base/threading/sequenced_worker_pool.cc \
 	base/threading/simple_thread.cc \
 	base/threading/thread.cc \
@@ -170,9 +171,9 @@
 	base/threading/thread_local_storage.cc \
 	base/threading/thread_local_storage_posix.cc \
 	base/threading/thread_restrictions.cc \
+	base/threading/thread_task_runner_handle.cc \
 	base/threading/worker_pool.cc \
 	base/threading/worker_pool_posix.cc \
-	base/thread_task_runner_handle.cc \
 	base/time/clock.cc \
 	base/time/default_clock.cc \
 	base/time/default_tick_clock.cc \
@@ -183,6 +184,9 @@
 	base/timer/timer.cc \
 	base/trace_event/heap_profiler_allocation_context.cc \
 	base/trace_event/heap_profiler_allocation_context_tracker.cc \
+	base/trace_event/heap_profiler_allocation_register.cc \
+	base/trace_event/heap_profiler_allocation_register_posix.cc \
+	base/trace_event/heap_profiler_heap_dump_writer.cc \
 	base/trace_event/heap_profiler_stack_frame_deduplicator.cc \
 	base/trace_event/heap_profiler_type_name_deduplicator.cc \
 	base/trace_event/memory_allocator_dump.cc \
@@ -395,7 +399,7 @@
 	crypto/secure_hash_unittest.cc \
 	crypto/sha2_unittest.cc \
 
-libchromeHostCFlags := -D__ANDROID_HOST__
+libchromeHostCFlags := -D__ANDROID_HOST__ -DDONT_EMBED_BUILD_METADATA
 
 ifeq ($(HOST_OS),linux)
 libchromeHostSrc := $(libchromeLinuxSrc)
@@ -419,6 +423,7 @@
 LOCAL_SRC_FILES := \
 	$(libchromeCommonSrc) \
 	$(libchromeLinuxSrc) \
+	base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc \
 	base/memory/shared_memory_android.cc \
 	base/sys_info_chromeos.cc \
 
@@ -426,6 +431,8 @@
 LOCAL_CFLAGS := $(libchromeCommonCFlags)
 LOCAL_CLANG := $(libchromeUseClang)
 LOCAL_C_INCLUDES := $(libchromeCommonCIncludes)
+LOCAL_LDFLAGS := -Wl,-wrap,calloc -Wl,-wrap,free -Wl,-wrap,malloc \
+	-Wl,-wrap,memalign -Wl,-wrap,realloc
 LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbase
 LOCAL_EXPORT_STATIC_LIBRARY_HEADERS := libgtest_prod
 LOCAL_SHARED_LIBRARIES :=  libbase libevent liblog libcutils
@@ -446,7 +453,11 @@
 LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbase
 LOCAL_SHARED_LIBRARIES := libbase libevent-host
 LOCAL_STATIC_LIBRARIES := libmodpb64-host libgtest_prod
-LOCAL_SRC_FILES := $(libchromeCommonSrc) $(libchromeHostSrc)
+LOCAL_SRC_FILES := \
+	$(libchromeCommonSrc) \
+	$(libchromeHostSrc) \
+	base/allocator/allocator_shim_default_dispatch_to_glibc.cc \
+
 LOCAL_LDFLAGS := $(libchromeHostLdFlags)
 include $(BUILD_HOST_SHARED_LIBRARY)
 
@@ -495,7 +506,7 @@
 LOCAL_SRC_FILES := \
 	crypto/openssl_util.cc \
 	crypto/random.cc \
-	crypto/secure_hash_openssl.cc \
+	crypto/secure_hash.cc \
 	crypto/secure_util.cc \
 	crypto/sha2.cc \
 
diff --git a/SConstruct b/SConstruct
index 964682e..170d4b1 100644
--- a/SConstruct
+++ b/SConstruct
@@ -27,6 +27,8 @@
     'name' : 'core',
     'sources' : """
                 allocator/allocator_extension.cc
+                allocator/allocator_shim.cc
+                allocator/allocator_shim_default_dispatch_to_glibc.cc
                 at_exit.cc
                 base64.cc
                 base64url.cc
@@ -62,7 +64,6 @@
                 files/scoped_file.cc
                 files/scoped_temp_dir.cc
                 guid.cc
-                guid_posix.cc
                 hash.cc
                 json/json_file_value_serializer.cc
                 json/json_parser.cc
@@ -167,6 +168,7 @@
                 threading/platform_thread_linux.cc
                 threading/platform_thread_posix.cc
                 threading/post_task_and_reply_impl.cc
+                threading/sequenced_task_runner_handle.cc
                 threading/sequenced_worker_pool.cc
                 threading/simple_thread.cc
                 threading/thread.cc
@@ -177,9 +179,9 @@
                 threading/thread_local_storage.cc
                 threading/thread_local_storage_posix.cc
                 threading/thread_restrictions.cc
+                threading/thread_task_runner_handle.cc
                 threading/worker_pool.cc
                 threading/worker_pool_posix.cc
-                thread_task_runner_handle.cc
                 timer/elapsed_timer.cc
                 timer/timer.cc
                 time/clock.cc
@@ -191,6 +193,9 @@
                 trace_event/malloc_dump_provider.cc
                 trace_event/heap_profiler_allocation_context.cc
                 trace_event/heap_profiler_allocation_context_tracker.cc
+                trace_event/heap_profiler_allocation_register.cc
+                trace_event/heap_profiler_allocation_register_posix.cc
+                trace_event/heap_profiler_heap_dump_writer.cc
                 trace_event/heap_profiler_stack_frame_deduplicator.cc
                 trace_event/heap_profiler_type_name_deduplicator.cc
                 trace_event/memory_allocator_dump.cc
@@ -272,7 +277,7 @@
                 rsa_private_key.cc
                 rsa_private_key_nss.cc
                 scoped_test_nss_db.cc
-                secure_hash_default.cc
+                secure_hash.cc
                 secure_util.cc
                 sha2.cc
                 signature_creator_nss.cc
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 1cbc063..5712663 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -19,6 +19,7 @@
 
 import("//build/buildflag_header.gni")
 import("//build/config/allocator.gni")
+import("//build/config/chromecast_build.gni")
 import("//build/config/compiler/compiler.gni")
 import("//build/config/nacl/config.gni")
 import("//build/config/sysroot.gni")
@@ -28,7 +29,8 @@
 
 declare_args() {
   # Override this value to give a specific build date.
-  # See //base/build_time.cc for more details.
+  # See //base/build_time.cc and //build/write_build_date_header.py for more
+  # details and the expected format.
   override_build_date = "N/A"
 }
 
@@ -64,15 +66,6 @@
       "/DELAYLOAD:setupapi.dll",
     ]
   }
-
-  copy("copy_dbghelp.dll") {
-    sources = [
-      "../build/win/dbghelp_xp/dbghelp.dll",
-    ]
-    outputs = [
-      "$root_out_dir/{{source_file_part}}",
-    ]
-  }
 }
 
 if (is_nacl_nonsfi) {
@@ -155,6 +148,8 @@
     "android/base_jni_registrar.h",
     "android/build_info.cc",
     "android/build_info.h",
+    "android/callback_android.cc",
+    "android/callback_android.h",
     "android/command_line_android.cc",
     "android/command_line_android.h",
     "android/content_uri_utils.cc",
@@ -352,8 +347,6 @@
     "gtest_prod_util.h",
     "guid.cc",
     "guid.h",
-    "guid_posix.cc",
-    "guid_win.cc",
     "hash.cc",
     "hash.h",
     "id_map.h",
@@ -457,6 +450,7 @@
     "memory/discardable_memory_allocator.h",
     "memory/discardable_shared_memory.cc",
     "memory/discardable_shared_memory.h",
+    "memory/free_deleter.h",
     "memory/linked_ptr.h",
     "memory/manual_constructor.h",
     "memory/memory_pressure_listener.cc",
@@ -477,7 +471,6 @@
     "memory/ref_counted_memory.cc",
     "memory/ref_counted_memory.h",
     "memory/scoped_policy.h",
-    "memory/scoped_ptr.h",
     "memory/scoped_vector.h",
     "memory/shared_memory.h",
     "memory/shared_memory_android.cc",
@@ -565,6 +558,7 @@
     "numerics/safe_math_impl.h",
     "observer_list.h",
     "observer_list_threadsafe.h",
+    "optional.h",
     "os_compat_android.cc",
     "os_compat_android.h",
     "os_compat_nacl.cc",
@@ -754,7 +748,7 @@
     #"sys_info_freebsd.cc",  # Unused in Chromium build.
     "sys_info_ios.mm",
     "sys_info_linux.cc",
-    "sys_info_mac.cc",
+    "sys_info_mac.mm",
 
     #"sys_info_openbsd.cc",  # Unused in Chromium build.
     "sys_info_posix.cc",
@@ -766,15 +760,34 @@
     "task_runner.cc",
     "task_runner.h",
     "task_runner_util.h",
+    "task_scheduler/delayed_task_manager.cc",
+    "task_scheduler/delayed_task_manager.h",
+    "task_scheduler/priority_queue.cc",
+    "task_scheduler/priority_queue.h",
     "task_scheduler/scheduler_lock.h",
     "task_scheduler/scheduler_lock_impl.cc",
     "task_scheduler/scheduler_lock_impl.h",
+    "task_scheduler/scheduler_service_thread.cc",
+    "task_scheduler/scheduler_service_thread.h",
+    "task_scheduler/scheduler_thread_pool.h",
+    "task_scheduler/scheduler_thread_pool_impl.cc",
+    "task_scheduler/scheduler_thread_pool_impl.h",
+    "task_scheduler/scheduler_worker_thread.cc",
+    "task_scheduler/scheduler_worker_thread.h",
+    "task_scheduler/scheduler_worker_thread_stack.cc",
+    "task_scheduler/scheduler_worker_thread_stack.h",
     "task_scheduler/sequence.cc",
     "task_scheduler/sequence.h",
     "task_scheduler/sequence_sort_key.cc",
     "task_scheduler/sequence_sort_key.h",
     "task_scheduler/task.cc",
     "task_scheduler/task.h",
+    "task_scheduler/task_scheduler.cc",
+    "task_scheduler/task_scheduler.h",
+    "task_scheduler/task_scheduler_impl.cc",
+    "task_scheduler/task_scheduler_impl.h",
+    "task_scheduler/task_tracker.cc",
+    "task_scheduler/task_tracker.h",
     "task_scheduler/task_traits.cc",
     "task_scheduler/task_traits.h",
     "template_util.h",
@@ -786,8 +799,6 @@
     "third_party/nspr/prtime.cc",
     "third_party/nspr/prtime.h",
     "third_party/superfasthash/superfasthash.c",
-    "thread_task_runner_handle.cc",
-    "thread_task_runner_handle.h",
     "threading/non_thread_safe.h",
     "threading/non_thread_safe_impl.cc",
     "threading/non_thread_safe_impl.h",
@@ -826,6 +837,8 @@
     "threading/thread_local_win.cc",
     "threading/thread_restrictions.cc",
     "threading/thread_restrictions.h",
+    "threading/thread_task_runner_handle.cc",
+    "threading/thread_task_runner_handle.h",
     "threading/watchdog.cc",
     "threading/watchdog.h",
     "threading/worker_pool.cc",
@@ -855,7 +868,10 @@
     "timer/mock_timer.h",
     "timer/timer.cc",
     "timer/timer.h",
+    "trace_event/blame_context.cc",
+    "trace_event/blame_context.h",
     "trace_event/common/trace_event_common.h",
+    "trace_event/heap_profiler.h",
     "trace_event/heap_profiler_allocation_context.cc",
     "trace_event/heap_profiler_allocation_context.h",
     "trace_event/heap_profiler_allocation_context_tracker.cc",
@@ -972,6 +988,8 @@
     "win/shortcut.h",
     "win/startup_information.cc",
     "win/startup_information.h",
+    "win/wait_chain.cc",
+    "win/wait_chain.h",
     "win/win_util.cc",
     "win/win_util.h",
     "win/windows_version.cc",
@@ -1017,7 +1035,7 @@
 
   # Allow more direct string conversions on platforms with native utf8
   # strings
-  if (is_mac || is_ios || is_chromeos) {
+  if (is_mac || is_ios || is_chromeos || is_chromecast) {
     defines += [ "SYSTEM_NATIVE_UTF8" ]
   }
 
@@ -1159,13 +1177,7 @@
       "sha1_win.cc",
     ]
 
-    # Required for base/stack_trace_win.cc to symbolize correctly.
-    data += [ "$root_build_dir/dbghelp.dll" ]
-
     deps += [ "//base/trace_event/etw_manifest:chrome_events_win" ]
-    if (current_toolchain == default_toolchain) {
-      deps += [ ":copy_dbghelp.dll" ]
-    }
 
     if (is_component_build) {
       # Copy the VS runtime DLLs into the isolate so that they don't have to be
@@ -1235,7 +1247,7 @@
         ]
       }
       if (is_asan) {
-        data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/3.9.0/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+        data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
       }
     }
 
@@ -1247,6 +1259,8 @@
       "netapi32.lib",
       "powrprof.lib",
       "setupapi.lib",
+      "userenv.lib",
+      "winmm.lib",
     ]
     all_dependent_configs = [ ":base_win_linker_flags" ]
   } else if (!is_nacl || is_nacl_nonsfi) {
@@ -1260,7 +1274,14 @@
       "trace_event/malloc_dump_provider.cc",
       "trace_event/malloc_dump_provider.h",
     ]
-    libs = [ "bsm" ]
+    libs = [
+      "ApplicationServices.framework",
+      "AppKit.framework",
+      "bsm",
+      "CoreFoundation.framework",
+      "IOKit.framework",
+      "Security.framework",
+    ]
   }
 
   # Mac or iOS.
@@ -1271,11 +1292,6 @@
       "strings/sys_string_conversions_posix.cc",
       "threading/platform_thread_internal_posix.cc",
     ]
-
-    if (is_asan) {
-      # TODO(GYP) hook up asan on Mac. GYP has this extra dylib:
-      #data += [ "$root_out_dir/libclang_rt.asan_osx_dynamic.dylib" ]
-    }
   } else {
     # Non-Mac/ios.
     sources -= [
@@ -1511,65 +1527,68 @@
 
   # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
   configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
+
+  if (is_mac) {
+    libs = [ "CoreFoundation.framework" ]
+  }
 }
 
-if (is_ios || is_android || is_win || (is_linux && !is_chromeos)) {
-  # TODO(GYP): Figure out which of these work and are needed on other platforms.
-  test("base_perftests") {
-    sources = [
-      "message_loop/message_pump_perftest.cc",
+test("base_perftests") {
+  sources = [
+    "message_loop/message_pump_perftest.cc",
 
-      # "test/run_all_unittests.cc",
-      "threading/thread_perftest.cc",
+    # "test/run_all_unittests.cc",
+    "threading/thread_perftest.cc",
+  ]
+  deps = [
+    ":base",
+    "//base/test:test_support",
+    "//base/test:test_support_perf",
+    "//testing/gtest",
+    "//testing/perf",
+  ]
+
+  if (is_android) {
+    deps += [ "//testing/android/native_test:native_test_native_code" ]
+  }
+}
+
+test("base_i18n_perftests") {
+  sources = [
+    "i18n/streaming_utf8_validator_perftest.cc",
+  ]
+  deps = [
+    ":base",
+    ":i18n",
+    "//base/test:test_support",
+    "//base/test:test_support_perf",
+    "//testing/gtest",
+  ]
+}
+
+if (!is_ios) {
+  executable("build_utf8_validator_tables") {
+    sources = [
+      "i18n/build_utf8_validator_tables.cc",
     ]
     deps = [
       ":base",
-      "//base/test:test_support",
-      "//base/test:test_support_perf",
-      "//testing/gtest",
-      "//testing/perf",
+      "//build/config/sanitizers:deps",
+      "//build/win:default_exe_manifest",
+      "//third_party/icu:icuuc",
     ]
-
-    if (is_android) {
-      deps += [ "//testing/android/native_test:native_test_native_code" ]
-    }
   }
 
-  test("base_i18n_perftests") {
+  executable("check_example") {
     sources = [
-      "i18n/streaming_utf8_validator_perftest.cc",
+      "check_example.cc",
     ]
     deps = [
       ":base",
-      ":i18n",
-      "//base/test:test_support",
-      "//base/test:test_support_perf",
-      "//testing/gtest",
+      "//build/config/sanitizers:deps",
+      "//build/win:default_exe_manifest",
     ]
   }
-
-  if (!is_ios) {
-    executable("build_utf8_validator_tables") {
-      sources = [
-        "i18n/build_utf8_validator_tables.cc",
-      ]
-      deps = [
-        ":base",
-        "//build/config/sanitizers:deps",
-        "//third_party/icu:icuuc",
-      ]
-    }
-
-    executable("check_example") {
-      sources = [
-        "check_example.cc",
-      ]
-      deps = [
-        ":base",
-        "//build/config/sanitizers:deps",
-      ]
-    }
-  }
 }
 
 source_set("message_loop_tests") {
@@ -1629,8 +1648,29 @@
 }
 
 bundle_data("base_unittests_bundle_data") {
+  testonly = true
   sources = [
-    "test/data",
+    "test/data/file_util/binary_file.bin",
+    "test/data/file_util/binary_file_diff.bin",
+    "test/data/file_util/binary_file_same.bin",
+    "test/data/file_util/blank_line.txt",
+    "test/data/file_util/blank_line_crlf.txt",
+    "test/data/file_util/crlf.txt",
+    "test/data/file_util/different.txt",
+    "test/data/file_util/different_first.txt",
+    "test/data/file_util/different_last.txt",
+    "test/data/file_util/empty1.txt",
+    "test/data/file_util/empty2.txt",
+    "test/data/file_util/first1.txt",
+    "test/data/file_util/first2.txt",
+    "test/data/file_util/original.txt",
+    "test/data/file_util/same.txt",
+    "test/data/file_util/same_length.txt",
+    "test/data/file_util/shortened.txt",
+    "test/data/json/bom_feff.json",
+    "test/data/serializer_nested_test.json",
+    "test/data/serializer_test.json",
+    "test/data/serializer_test_nowhitespace.json",
   ]
   outputs = [
     "{{bundle_resources_dir}}/" +
@@ -1657,6 +1697,7 @@
     "base64url_unittest.cc",
     "big_endian_unittest.cc",
     "bind_unittest.cc",
+    "bit_cast_unittest.cc",
     "bits_unittest.cc",
     "build_time_unittest.cc",
     "callback_helpers_unittest.cc",
@@ -1680,6 +1721,7 @@
     "debug/task_annotator_unittest.cc",
     "deferred_sequenced_task_runner_unittest.cc",
     "environment_unittest.cc",
+    "feature_list_unittest.cc",
     "file_version_info_unittest.cc",
     "files/dir_reader_posix_unittest.cc",
     "files/file_locking_unittest.cc",
@@ -1733,7 +1775,9 @@
     "memory/aligned_memory_unittest.cc",
     "memory/discardable_shared_memory_unittest.cc",
     "memory/linked_ptr_unittest.cc",
+    "memory/memory_pressure_listener_unittest.cc",
     "memory/memory_pressure_monitor_chromeos_unittest.cc",
+    "memory/memory_pressure_monitor_mac_unittest.cc",
     "memory/memory_pressure_monitor_win_unittest.cc",
     "memory/ptr_util_unittest.cc",
     "memory/ref_counted_memory_unittest.cc",
@@ -1766,6 +1810,7 @@
     "native_library_unittest.cc",
     "numerics/safe_numerics_unittest.cc",
     "observer_list_unittest.cc",
+    "optional_unittest.cc",
     "os_compat_android_unittest.cc",
     "path_service_unittest.cc",
     "pickle_unittest.cc",
@@ -1815,9 +1860,20 @@
     "system_monitor/system_monitor_unittest.cc",
     "task/cancelable_task_tracker_unittest.cc",
     "task_runner_util_unittest.cc",
+    "task_scheduler/delayed_task_manager_unittest.cc",
+    "task_scheduler/priority_queue_unittest.cc",
     "task_scheduler/scheduler_lock_unittest.cc",
+    "task_scheduler/scheduler_service_thread_unittest.cc",
+    "task_scheduler/scheduler_thread_pool_impl_unittest.cc",
+    "task_scheduler/scheduler_worker_thread_stack_unittest.cc",
+    "task_scheduler/scheduler_worker_thread_unittest.cc",
     "task_scheduler/sequence_sort_key_unittest.cc",
     "task_scheduler/sequence_unittest.cc",
+    "task_scheduler/task_scheduler_impl_unittest.cc",
+    "task_scheduler/task_tracker_unittest.cc",
+    "task_scheduler/test_task_factory.cc",
+    "task_scheduler/test_task_factory.h",
+    "task_scheduler/test_utils.h",
     "template_util_unittest.cc",
     "test/histogram_tester_unittest.cc",
     "test/icu_test_util.cc",
@@ -1847,6 +1903,7 @@
     "timer/mock_timer_unittest.cc",
     "timer/timer_unittest.cc",
     "tools_sanity_unittest.cc",
+    "trace_event/blame_context_unittest.cc",
     "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
     "trace_event/heap_profiler_allocation_register_unittest.cc",
     "trace_event/heap_profiler_heap_dump_writer_unittest.cc",
@@ -1886,6 +1943,7 @@
     "win/scoped_variant_unittest.cc",
     "win/shortcut_unittest.cc",
     "win/startup_information_unittest.cc",
+    "win/wait_chain_unittest.cc",
     "win/win_util_unittest.cc",
     "win/windows_version_unittest.cc",
     "win/wrapped_window_proc_unittest.cc",
@@ -1916,7 +1974,7 @@
 
   # Allow more direct string conversions on platforms with native utf8
   # strings
-  if (is_mac || is_ios || is_chromeos) {
+  if (is_mac || is_ios || is_chromeos || is_chromecast) {
     defines = [ "SYSTEM_NATIVE_UTF8" ]
   }
 
@@ -1926,10 +1984,6 @@
       ":base_java_unittest_support",
       "//base/android/jni_generator:jni_generator_tests",
     ]
-
-    # TODO(brettw) I think this should not be here, we should not be using
-    # isolate files.
-    isolate_file = "base_unittests.isolate"
   }
 
   if (is_ios) {
@@ -1959,6 +2013,13 @@
     # TODO(GYP): dep on copy_test_data_ios action.
   }
 
+  if (is_mac) {
+    libs = [
+      "CoreFoundation.framework",
+      "Foundation.framework",
+    ]
+  }
+
   if (is_linux) {
     sources -= [ "file_version_info_unittest.cc" ]
 
@@ -2017,7 +2078,8 @@
     if (is_win) {
       data += [ "$root_out_dir/base_unittests.exe.pdb" ]
     } else if (is_mac) {
-      data += [ "$root_out_dir/base_unittests.dSYM/" ]
+      # TODO(crbug.com/330301): make this conditional on mac_strip_release.
+      # data += [ "$root_out_dir/base_unittests.dSYM/" ]
     }
   }
 }
@@ -2072,6 +2134,7 @@
       "android/java/src/org/chromium/base/ApkAssets.java",
       "android/java/src/org/chromium/base/ApplicationStatus.java",
       "android/java/src/org/chromium/base/BuildInfo.java",
+      "android/java/src/org/chromium/base/Callback.java",
       "android/java/src/org/chromium/base/CommandLine.java",
       "android/java/src/org/chromium/base/ContentUriUtils.java",
       "android/java/src/org/chromium/base/ContextUtils.java",
@@ -2121,7 +2184,68 @@
       "//third_party/jsr-305:jsr_305_javalib",
     ]
 
-    DEPRECATED_java_in_dir = "android/java/src"
+    java_files = [
+      "android/java/src/org/chromium/base/ActivityState.java",
+      "android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java",
+      "android/java/src/org/chromium/base/ApiCompatibilityUtils.java",
+      "android/java/src/org/chromium/base/ApkAssets.java",
+      "android/java/src/org/chromium/base/ApplicationStatus.java",
+      "android/java/src/org/chromium/base/BaseChromiumApplication.java",
+      "android/java/src/org/chromium/base/BaseSwitches.java",
+      "android/java/src/org/chromium/base/BuildInfo.java",
+      "android/java/src/org/chromium/base/Callback.java",
+      "android/java/src/org/chromium/base/CollectionUtil.java",
+      "android/java/src/org/chromium/base/CommandLine.java",
+      "android/java/src/org/chromium/base/CommandLineInitUtil.java",
+      "android/java/src/org/chromium/base/ContentUriUtils.java",
+      "android/java/src/org/chromium/base/ContextUtils.java",
+      "android/java/src/org/chromium/base/CpuFeatures.java",
+      "android/java/src/org/chromium/base/EventLog.java",
+      "android/java/src/org/chromium/base/FieldTrialList.java",
+      "android/java/src/org/chromium/base/FileUtils.java",
+      "android/java/src/org/chromium/base/ImportantFileWriterAndroid.java",
+      "android/java/src/org/chromium/base/JNIUtils.java",
+      "android/java/src/org/chromium/base/JavaHandlerThread.java",
+      "android/java/src/org/chromium/base/LocaleUtils.java",
+      "android/java/src/org/chromium/base/Log.java",
+      "android/java/src/org/chromium/base/MemoryPressureListener.java",
+      "android/java/src/org/chromium/base/ObserverList.java",
+      "android/java/src/org/chromium/base/PackageUtils.java",
+      "android/java/src/org/chromium/base/PathService.java",
+      "android/java/src/org/chromium/base/PathUtils.java",
+      "android/java/src/org/chromium/base/PerfTraceEvent.java",
+      "android/java/src/org/chromium/base/PowerMonitor.java",
+      "android/java/src/org/chromium/base/PowerStatusReceiver.java",
+      "android/java/src/org/chromium/base/ResourceExtractor.java",
+      "android/java/src/org/chromium/base/SecureRandomInitializer.java",
+      "android/java/src/org/chromium/base/StreamUtil.java",
+      "android/java/src/org/chromium/base/SysUtils.java",
+      "android/java/src/org/chromium/base/SystemMessageHandler.java",
+      "android/java/src/org/chromium/base/ThreadUtils.java",
+      "android/java/src/org/chromium/base/TraceEvent.java",
+      "android/java/src/org/chromium/base/VisibleForTesting.java",
+      "android/java/src/org/chromium/base/annotations/AccessedByNative.java",
+      "android/java/src/org/chromium/base/annotations/CalledByNative.java",
+      "android/java/src/org/chromium/base/annotations/CalledByNativeUnchecked.java",
+      "android/java/src/org/chromium/base/annotations/JNIAdditionalImport.java",
+      "android/java/src/org/chromium/base/annotations/JNINamespace.java",
+      "android/java/src/org/chromium/base/annotations/MainDex.java",
+      "android/java/src/org/chromium/base/annotations/NativeCall.java",
+      "android/java/src/org/chromium/base/annotations/NativeClassQualifiedName.java",
+      "android/java/src/org/chromium/base/annotations/RemovableInRelease.java",
+      "android/java/src/org/chromium/base/annotations/SuppressFBWarnings.java",
+      "android/java/src/org/chromium/base/annotations/UsedByReflection.java",
+      "android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
+      "android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
+      "android/java/src/org/chromium/base/library_loader/Linker.java",
+      "android/java/src/org/chromium/base/library_loader/LoaderErrors.java",
+      "android/java/src/org/chromium/base/library_loader/ModernLinker.java",
+      "android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
+      "android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
+      "android/java/src/org/chromium/base/metrics/RecordHistogram.java",
+      "android/java/src/org/chromium/base/metrics/RecordUserAction.java",
+      "android/java/src/org/chromium/base/multidex/ChromiumMultiDexInstaller.java",
+    ]
 
     # New versions of BuildConfig.java and NativeLibraries.java
     # (with the actual correct values) will be created when creating an apk.
@@ -2138,7 +2262,14 @@
       ":base_java",
       ":base_java_test_support",
     ]
-    DEPRECATED_java_in_dir = "android/javatests/src"
+    java_files = [
+      "android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
+      "android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
+      "android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
+      "android/javatests/src/org/chromium/base/CommandLineTest.java",
+      "android/javatests/src/org/chromium/base/ObserverListTest.java",
+      "android/javatests/src/org/chromium/base/metrics/RecordHistogramTest.java",
+    ]
   }
 
   # GYP: //base.gyp:base_java_test_support
@@ -2147,7 +2278,40 @@
       ":base_java",
       "//testing/android/reporter:reporter_java",
     ]
-    DEPRECATED_java_in_dir = "test/android/javatests/src"
+    java_files = [
+      "test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
+      "test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
+      "test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
+      "test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/DisabledTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/EnormousTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Feature.java",
+      "test/android/javatests/src/org/chromium/base/test/util/FlakyTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/InMemorySharedPreferences.java",
+      "test/android/javatests/src/org/chromium/base/test/util/InstrumentationUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/IntegrationTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Manual.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MetricsUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevel.java",
+      "test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/PerfTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/Restriction.java",
+      "test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java",
+      "test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TestThread.java",
+      "test/android/javatests/src/org/chromium/base/test/util/TimeoutScale.java",
+      "test/android/javatests/src/org/chromium/base/test/util/UrlUtils.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/BaseParameter.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/Parameter.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/Parameterizable.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/ParameterizedTest.java",
+      "test/android/javatests/src/org/chromium/base/test/util/parameter/parameters/MethodParameter.java",
+    ]
   }
 
   # TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 887d544..96ccad2 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -6,10 +6,6 @@
 import("//build/config/allocator.gni")
 import("//build/config/compiler/compiler.gni")
 
-if (is_win) {
-  import("//build/config/win/visual_studio_version.gni")
-}
-
 declare_args() {
   # Provide a way to force disable debugallocation in Debug builds,
   # e.g. for profiling (it's more rare to profile Debug builds,
@@ -17,22 +13,22 @@
   enable_debugallocation = is_debug
 }
 
+# Allocator shim is only enabled for Release static builds.
+win_use_allocator_shim = is_win && !is_component_build && !is_debug
+
 # This "allocator" meta-target will forward to the default allocator according
 # to the build settings.
 group("allocator") {
-  if (!is_nacl) {
-    deps = []
+  public_deps = []
+  deps = []
 
-    if (use_allocator == "tcmalloc") {
-      deps += [ ":tcmalloc" ]
-    }
+  if (use_allocator == "tcmalloc") {
+    deps += [ ":tcmalloc" ]
+  }
 
-    # This condition expresses the win_use_allocator_shim in the GYP build.
-    if (is_win && !is_component_build && visual_studio_version != "2015") {
-      deps += [ ":allocator_shim" ]
-      all_dependent_configs = [ ":nocmt" ]
-    }
-  }  # !is_nacl
+  if (win_use_allocator_shim) {
+    public_deps += [ ":allocator_shim" ]
+  }
 }
 
 # This config defines ALLOCATOR_SHIM in the same conditions that the allocator
@@ -42,7 +38,7 @@
 # assumes that the library using it will eventually be linked with
 # //base/allocator in the default way. Clean this up and delete this.
 config("allocator_shim_define") {
-  if (is_win && !is_component_build && visual_studio_version != "2015") {
+  if (win_use_allocator_shim) {
     defines = [ "ALLOCATOR_SHIM" ]
   }
 }
@@ -78,46 +74,15 @@
   }
 }
 
-# This config and libc modification are only used on Windows.
-if (is_win) {
-  config("nocmt") {
-    ldflags = [
-      "/NODEFAULTLIB:libcmt",
-      "/NODEFAULTLIB:libcmtd",
+# This config is only used on Windows static release builds for the
+# allocator shim.
+if (win_use_allocator_shim) {
+  source_set("allocator_shim") {
+    sources = [
+      "allocator_shim_win.cc",
+      "allocator_shim_win.h",
     ]
-    libs = [ rebase_path("$target_gen_dir/allocator/libcmt.lib") ]
-  }
-
-  if (!is_component_build && visual_studio_version != "2015") {
-    action("prep_libc") {
-      script = "prep_libc.py"
-      outputs = [
-        "$target_gen_dir/allocator/libcmt.lib",
-      ]
-      args = [
-        visual_studio_path + "/vc/lib",
-        rebase_path("$target_gen_dir/allocator"),
-        current_cpu,
-
-        # The environment file in the build directory. This is required because
-        # the Windows toolchain setup saves the VC paths and such so that
-        # running "mc.exe" will work with the configured toolchain. This file
-        # is in the root build dir.
-        "environment.$current_cpu",
-      ]
-    }
-
-    source_set("allocator_shim") {
-      sources = [
-        "allocator_shim_win.cc",
-      ]
-      configs -= [ "//build/config/compiler:chromium_code" ]
-      configs += [ "//build/config/compiler:no_chromium_code" ]
-
-      deps = [
-        ":prep_libc",
-      ]
-    }
+    configs += [ ":allocator_shim_define" ]
   }
 }
 
@@ -272,6 +237,7 @@
       ]
 
       configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+      configs += [ "//build/config/gcc:symbol_visibility_default" ]
 
       ldflags = [
         # Don't let linker rip this symbol out, otherwise the heap&cpu
@@ -302,9 +268,23 @@
 }
 
 if (use_experimental_allocator_shim) {
+  # Used to shim malloc symbols on Android. see //base/allocator/README.md.
+  config("wrap_malloc_symbols") {
+    ldflags = [
+      "-Wl,-wrap,calloc",
+      "-Wl,-wrap,free",
+      "-Wl,-wrap,malloc",
+      "-Wl,-wrap,memalign",
+      "-Wl,-wrap,posix_memalign",
+      "-Wl,-wrap,pvalloc",
+      "-Wl,-wrap,realloc",
+      "-Wl,-wrap,valloc",
+    ]
+  }
+
   source_set("unified_allocator_shim") {
     # TODO(primiano): support other platforms, currently this works only on
-    # Linux/CrOS. http://crbug.com/550886 .
+    # Linux/CrOS/Android. http://crbug.com/550886 .
     configs += [ "//base:base_implementation" ]  # for BASE_EXPORT
     visibility = [ "//base:base" ]
     sources = [
@@ -324,6 +304,12 @@
       ]
     } else if (is_linux && use_allocator == "none") {
       sources += [ "allocator_shim_default_dispatch_to_glibc.cc" ]
+    } else if (is_android && use_allocator == "none") {
+      sources += [
+        "allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
+        "allocator_shim_override_linker_wrapped_symbols.h",
+      ]
+      all_dependent_configs = [ ":wrap_malloc_symbols" ]
     }
   }
 }
diff --git a/base/allocator/README.md b/base/allocator/README.md
index 968e6b0..164df51 100644
--- a/base/allocator/README.md
+++ b/base/allocator/README.md
@@ -85,6 +85,93 @@
 time we no longer need any forked files.
 
 
+Unified allocator shim
+----------------------
+On most platform, Chrome overrides the malloc / operator new symbols (and
+corresponding free / delete and other variants). This is to enforce security
+checks and lately to enable the
+[memory-infra heap profiler][url-memory-infra-heap-profiler].  
+Historically each platform had its special logic for defining the allocator
+symbols in different places of the codebase. The unified allocator shim is
+a project aimed to unify the symbol definition and allocator routing logic in
+a central place.
+
+ - Full documentation: [Allocator shim design doc][url-allocator-shim].
+ - Current state: Available and enabled by default on Linux, CrOS and Android.
+ - Tracking bug: [https://crbug.com/550886][crbug.com/550886].
+ - Build-time flag: `use_experimental_allocator_shim`.
+
+**Overview of the unified allocator shim**  
+The allocator shim consists of three stages:
+```
++-------------------------+    +-----------------------+    +----------------+
+|     malloc & friends    | -> |       shim layer      | -> |   Routing to   |
+|    symbols definition   |    |     implementation    |    |    allocator   |
++-------------------------+    +-----------------------+    +----------------+
+| - libc symbols (malloc, |    | - Security checks     |    | - tcmalloc     |
+|   calloc, free, ...)    |    | - Chain of dispatchers|    | - glibc        |
+| - C++ symbols (operator |    |   that can intercept  |    | - Android      |
+|   new, delete, ...)     |    |   and override        |    |   bionic       |
+| - glibc weak symbols    |    |   allocations         |    | - WinHeap      |
+|   (__libc_malloc, ...)  |    +-----------------------+    +----------------+
++-------------------------+
+```
+
+**1. malloc symbols definition**  
+This stage takes care of overriding the symbols `malloc`, `free`,
+`operator new`, `operator delete` and friends and routing those calls inside the
+allocator shim (next point).
+This is taken care of by the headers in `allocator_shim_override_*`.
+
+*On Linux/CrOS*: the allocator symbols are defined as exported global symbols
+in `allocator_shim_override_libc_symbols.h` (for `malloc`, `free` and friends)
+and in `allocator_shim_override_cpp_symbols.h` (for `operator new`,
+`operator delete` and friends).
+This enables proper interposition of malloc symbols referenced by the main
+executable and any third party libraries. Symbol resolution on Linux is a breadth first search that starts from the root link unit, that is the executable
+(see EXECUTABLE AND LINKABLE FORMAT (ELF) - Portable Formats Specification).
+Additionally, when tcmalloc is the default allocator, some extra glibc symbols
+are also defined in `allocator_shim_override_glibc_weak_symbols.h`, for subtle
+reasons explained in that file.
+The Linux/CrOS shim was introduced by
+[crrev.com/1675143004](https://crrev.com/1675143004).
+
+*On Android*: load-time symbol interposition (unlike the Linux/CrOS case) is not
+possible. This is because Android processes are `fork()`-ed from the Android
+zygote, which pre-loads libc.so and only later native code gets loaded via
+`dlopen()` (symbols from `dlopen()`-ed libraries get a different resolution
+scope).
+In this case, the approach instead of wrapping symbol resolution at link time
+(i.e. during the build), via the `--Wl,-wrap,malloc` linker flag.
+The use of this wrapping flag causes:
+ - All references to allocator symbols in the Chrome codebase to be rewritten as
+   references to `__wrap_malloc` and friends. The `__wrap_malloc` symbols are
+   defined in the `allocator_shim_override_linker_wrapped_symbols.h` and
+   route allocator calls inside the shim layer.
+ - The reference to the original `malloc` symbols (which typically is defined by
+   the system's libc.so) are accessible via the special `__real_malloc` and
+   friends symbols (which will be relocated, at load time, against `malloc`).
+
+In summary, this approach is transparent to the dynamic loader, which still sees
+undefined symbol references to malloc symbols.
+These symbols will be resolved against libc.so as usual.
+More details in [crrev.com/1719433002](https://crrev.com/1719433002).
+
+**2. Shim layer implementation**  
+This stage contains the actual shim implementation. This consists of:
+- A singly linked list of dispatchers (structs with function pointers to `malloc`-like functions). Dispatchers can be dynamically inserted at runtime
+(using the `InsertAllocatorDispatch` API). They can intercept and override
+allocator calls.
+- The security checks (suicide on malloc-failure via `std::new_handler`, etc).
+This happens inside `allocator_shim.cc`
+
+**3. Final allocator routing**  
+The final element of the aforementioned dispatcher chain is statically defined
+at build time and ultimately routes the allocator calls to the actual allocator
+(as described in the *Background* section above). This is taken care of by the
+headers in `allocator_shim_default_dispatch_to_*` files.
+
+
 Appendixes
 ----------
 **How does the Windows shim layer replace the malloc symbols?**  
@@ -99,8 +186,11 @@
 
 Related links
 -------------
-- [Allocator Cleanup Doc - Jan 2016][url-allocator-cleanup]
+- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
+- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
 - [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
 - [Memory-Infra: Tools to profile memory usage in Chrome](components/tracing/docs/memory_infra.md)
 
 [url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
+[url-memory-infra-heap-profiler]: components/tracing/docs/heap_profiler.md
+[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
diff --git a/base/allocator/allocator.gyp b/base/allocator/allocator.gyp
index d906eea..3844c08 100644
--- a/base/allocator/allocator.gyp
+++ b/base/allocator/allocator.gyp
@@ -49,25 +49,13 @@
               'AdditionalOptions': ['/ignore:4006'],
             },
           },
-          'dependencies': [
-            'libcmt',
-          ],
           'include_dirs': [
             '../..',
           ],
           'sources': [
             'allocator_shim_win.cc',
+            'allocator_shim_win.h',
           ],
-          'link_settings': {
-            'msvs_settings': {
-              'VCLinkerTool': {
-                'IgnoreDefaultLibraryNames': ['libcmtd.lib', 'libcmt.lib'],
-                'AdditionalDependencies': [
-                  '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib'
-                ],
-              },
-            },
-          },
           'configurations': {
             'Debug_Base': {
               'msvs_settings': {
@@ -393,33 +381,6 @@
     },  # 'allocator_features' target.
   ],  # targets.
   'conditions': [
-    ['OS=="win" and win_use_allocator_shim==1', {
-      'targets': [
-        {
-          'target_name': 'libcmt',
-          'toolsets': ['host', 'target'],
-          'type': 'none',
-          'actions': [
-            {
-              'action_name': 'libcmt',
-              'inputs': [
-                'prep_libc.py',
-              ],
-              'outputs': [
-                '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib',
-              ],
-              'action': [
-                'python',
-                'prep_libc.py',
-                '$(VCInstallDir)lib',
-                '<(SHARED_INTERMEDIATE_DIR)/allocator',
-                '<(target_arch)',
-              ],
-            },
-          ],
-        },
-      ],
-    }],
     ['use_experimental_allocator_shim==1', {
       'targets': [
         {
@@ -445,11 +406,32 @@
                 'allocator_shim_override_glibc_weak_symbols.h',
               ],
             }],
-            ['OS=="linux" and use_allocator=="none"', {
+            ['use_allocator=="none" and (OS=="linux" or (OS=="android" and _toolset == "host" and host_os == "linux"))', {
               'sources': [
                 'allocator_shim_default_dispatch_to_glibc.cc',
               ],
             }],
+            ['OS=="android" and _toolset == "target"', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc',
+                'allocator_shim_override_linker_wrapped_symbols.h',
+              ],
+              # On Android all references to malloc & friends symbols are
+              # rewritten, at link time, and routed to the shim.
+              # See //base/allocator/README.md.
+              'all_dependent_settings': {
+                'ldflags': [
+                  '-Wl,-wrap,calloc',
+                  '-Wl,-wrap,free',
+                  '-Wl,-wrap,malloc',
+                  '-Wl,-wrap,memalign',
+                  '-Wl,-wrap,posix_memalign',
+                  '-Wl,-wrap,pvalloc',
+                  '-Wl,-wrap,realloc',
+                  '-Wl,-wrap,valloc',
+                ],
+              },
+            }],
           ]
         },  # 'unified_allocator_shim' target.
       ],
diff --git a/base/allocator/allocator_shim.cc b/base/allocator/allocator_shim.cc
new file mode 100644
index 0000000..09ed45f
--- /dev/null
+++ b/base/allocator/allocator_shim.cc
@@ -0,0 +1,260 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#include <new>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+
+// No calls to malloc / new in this file. They would would cause re-entrancy of
+// the shim, which is hard to deal with. Keep this code as simple as possible
+// and don't use any external C++ object here, not even //base ones. Even if
+// they are safe to use today, in future they might be refactored.
+
+namespace {
+
+using namespace base;
+
+subtle::AtomicWord g_chain_head = reinterpret_cast<subtle::AtomicWord>(
+    &allocator::AllocatorDispatch::default_dispatch);
+
+bool g_call_new_handler_on_malloc_failure = false;
+subtle::Atomic32 g_new_handler_lock = 0;
+
+// In theory this should be just base::ThreadChecker. But we can't afford
+// the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
+bool CalledOnValidThread() {
+  using subtle::Atomic32;
+  const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
+  static Atomic32 g_tid = kInvalidTID;
+  Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
+  Atomic32 prev_tid =
+      subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
+  return prev_tid == kInvalidTID || prev_tid == cur_tid;
+}
+
+inline size_t GetPageSize() {
+  static size_t pagesize = 0;
+  if (!pagesize)
+    pagesize = sysconf(_SC_PAGESIZE);
+  return pagesize;
+}
+
+// Calls the std::new handler thread-safely. Returns true if a new_handler was
+// set and called, false if no new_handler was set.
+bool CallNewHandler() {
+  // TODO(primiano): C++11 has introduced ::get_new_handler() which is supposed
+  // to be thread safe and would avoid the spinlock boilerplate here. However
+  // it doesn't seem to be available yet in the Linux chroot headers yet.
+  std::new_handler nh;
+  {
+    while (subtle::Acquire_CompareAndSwap(&g_new_handler_lock, 0, 1))
+      PlatformThread::YieldCurrentThread();
+    nh = std::set_new_handler(0);
+    ignore_result(std::set_new_handler(nh));
+    subtle::Release_Store(&g_new_handler_lock, 0);
+  }
+  if (!nh)
+    return false;
+  (*nh)();
+  // Assume the new_handler will abort if it fails. Exception are disabled and
+  // we don't support the case of a new_handler throwing std::bad_balloc.
+  return true;
+}
+
+inline const allocator::AllocatorDispatch* GetChainHead() {
+  // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
+  // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
+  // barriered on Linux+Clang, and that causes visible perf regressons.
+  return reinterpret_cast<const allocator::AllocatorDispatch*>(
+#if defined(OS_LINUX) && defined(__clang__)
+      *static_cast<const volatile subtle::AtomicWord*>(&g_chain_head)
+#else
+      subtle::NoBarrier_Load(&g_chain_head)
+#endif
+  );
+}
+
+}  // namespace
+
+namespace base {
+namespace allocator {
+
+void SetCallNewHandlerOnMallocFailure(bool value) {
+  g_call_new_handler_on_malloc_failure = value;
+}
+
+void* UncheckedAlloc(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->alloc_function(chain_head, size);
+}
+
+void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
+  // Ensure this is always called on the same thread.
+  DCHECK(CalledOnValidThread());
+
+  dispatch->next = GetChainHead();
+
+  // This function does not guarantee to be thread-safe w.r.t. concurrent
+  // insertions, but still has to guarantee that all the threads always
+  // see a consistent chain, hence the MemoryBarrier() below.
+  // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
+  // we don't really want this to be a release-store with a corresponding
+  // acquire-load during malloc().
+  subtle::MemoryBarrier();
+
+  subtle::NoBarrier_Store(&g_chain_head,
+                          reinterpret_cast<subtle::AtomicWord>(dispatch));
+}
+
+void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
+  DCHECK(CalledOnValidThread());
+  DCHECK_EQ(GetChainHead(), dispatch);
+  subtle::NoBarrier_Store(&g_chain_head,
+                          reinterpret_cast<subtle::AtomicWord>(dispatch->next));
+}
+
+}  // namespace allocator
+}  // namespace base
+
+// The Shim* functions below are the entry-points into the shim-layer and
+// are supposed to be invoked / aliased by the allocator_shim_override_*
+// headers to route the malloc / new symbols through the shim layer.
+extern "C" {
+
+// The general pattern for allocations is:
+// - Try to allocate, if succeded return the pointer.
+// - If the allocation failed:
+//   - Call the std::new_handler if it was a C++ allocation.
+//   - Call the std::new_handler if it was a malloc() (or calloc() or similar)
+//     AND SetCallNewHandlerOnMallocFailure(true).
+//   - If the std::new_handler is NOT set just return nullptr.
+//   - If the std::new_handler is set:
+//     - Assume it will abort() if it fails (very likely the new_handler will
+//       just suicide priting a message).
+//     - Assume it did succeed if it returns, in which case reattempt the alloc.
+
+void* ShimCppNew(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_function(chain_head, size);
+  } while (!ptr && CallNewHandler());
+  return ptr;
+}
+
+void ShimCppDelete(void* address) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address);
+}
+
+void* ShimMalloc(size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_function(chain_head, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+  return ptr;
+}
+
+void* ShimCalloc(size_t n, size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_zero_initialized_function(chain_head, n, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+  return ptr;
+}
+
+void* ShimRealloc(void* address, size_t size) {
+  // realloc(size == 0) means free() and might return a nullptr. We should
+  // not call the std::new_handler in that case, though.
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->realloc_function(chain_head, address, size);
+  } while (!ptr && size && g_call_new_handler_on_malloc_failure &&
+           CallNewHandler());
+  return ptr;
+}
+
+void* ShimMemalign(size_t alignment, size_t size) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  void* ptr;
+  do {
+    ptr = chain_head->alloc_aligned_function(chain_head, alignment, size);
+  } while (!ptr && g_call_new_handler_on_malloc_failure && CallNewHandler());
+  return ptr;
+}
+
+int ShimPosixMemalign(void** res, size_t alignment, size_t size) {
+  // posix_memalign is supposed to check the arguments. See tc_posix_memalign()
+  // in tc_malloc.cc.
+  if (((alignment % sizeof(void*)) != 0) ||
+      ((alignment & (alignment - 1)) != 0) || (alignment == 0)) {
+    return EINVAL;
+  }
+  void* ptr = ShimMemalign(alignment, size);
+  *res = ptr;
+  return ptr ? 0 : ENOMEM;
+}
+
+void* ShimValloc(size_t size) {
+  return ShimMemalign(GetPageSize(), size);
+}
+
+void* ShimPvalloc(size_t size) {
+  // pvalloc(0) should allocate one page, according to its man page.
+  if (size == 0) {
+    size = GetPageSize();
+  } else {
+    size = (size + GetPageSize() - 1) & ~(GetPageSize() - 1);
+  }
+  return ShimMemalign(GetPageSize(), size);
+}
+
+void ShimFree(void* address) {
+  const allocator::AllocatorDispatch* const chain_head = GetChainHead();
+  return chain_head->free_function(chain_head, address);
+}
+
+}  // extern "C"
+
+// Cpp symbols (new / delete) should always be routed through the shim layer.
+#include "base/allocator/allocator_shim_override_cpp_symbols.h"
+
+// Android does not support symbol interposition. The way malloc symbols are
+// intercepted on Android is by using link-time -wrap flags.
+#if !defined(OS_ANDROID) && !defined(ANDROID)
+// Ditto for plain malloc() / calloc() / free() etc. symbols.
+#include "base/allocator/allocator_shim_override_libc_symbols.h"
+#else
+#include "base/allocator/allocator_shim_override_linker_wrapped_symbols.h"
+#endif
+
+// In the case of tcmalloc we also want to plumb into the glibc hooks
+// to avoid that allocations made in glibc itself (e.g., strdup()) get
+// accidentally performed on the glibc heap instead of the tcmalloc one.
+#if defined(USE_TCMALLOC)
+#include "base/allocator/allocator_shim_override_glibc_weak_symbols.h"
+#endif
+
+// Cross-checks.
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#error The allocator shim should not be compiled when building for memory tools.
+#endif
+
+#if (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
+    (defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS)
+#error This code cannot be used when exceptions are turned on.
+#endif
diff --git a/base/allocator/allocator_shim.h b/base/allocator/allocator_shim.h
new file mode 100644
index 0000000..f1a1e3d
--- /dev/null
+++ b/base/allocator/allocator_shim.h
@@ -0,0 +1,96 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace allocator {
+
+// Allocator Shim API. Allows to to:
+//  - Configure the behavior of the allocator (what to do on OOM failures).
+//  - Install new hooks (AllocatorDispatch) in the allocator chain.
+
+// When this shim layer is enabled, the route of an allocation is as-follows:
+//
+// [allocator_shim_override_*.h] Intercept malloc() / operator new calls:
+//   The override_* headers define the symbols required to intercept calls to
+//   malloc() and operator new (if not overridden by specific C++ classes).
+//
+// [allocator_shim.cc] Routing allocation calls to the shim:
+//   The headers above route the calls to the internal ShimMalloc(), ShimFree(),
+//   ShimCppNew() etc. methods defined in allocator_shim.cc.
+//   These methods will: (1) forward the allocation call to the front of the
+//   AllocatorDispatch chain. (2) perform security hardenings (e.g., might
+//   call std::new_handler on OOM failure).
+//
+// [allocator_shim_default_dispatch_to_*.cc] The AllocatorDispatch chain:
+//   It is a singly linked list where each element is a struct with function
+//   pointers (|malloc_function|, |free_function|, etc). Normally the chain
+//   consists of a single AllocatorDispatch element, herein called
+//   the "default dispatch", which is statically defined at build time and
+//   ultimately routes the calls to the actual allocator defined by the build
+//   config (tcmalloc, glibc, ...).
+//
+// It is possible to dynamically insert further AllocatorDispatch stages
+// to the front of the chain, for debugging / profiling purposes.
+//
+// All the functions must be thred safe. The shim does not enforce any
+// serialization. This is to route to thread-aware allocators (e.g, tcmalloc)
+// wihout introducing unnecessary perf hits.
+
+struct AllocatorDispatch {
+  using AllocFn = void*(const AllocatorDispatch* self, size_t size);
+  using AllocZeroInitializedFn = void*(const AllocatorDispatch* self,
+                                       size_t n,
+                                       size_t size);
+  using AllocAlignedFn = void*(const AllocatorDispatch* self,
+                               size_t alignment,
+                               size_t size);
+  using ReallocFn = void*(const AllocatorDispatch* self,
+                          void* address,
+                          size_t size);
+  using FreeFn = void(const AllocatorDispatch* self, void* address);
+
+  AllocFn* const alloc_function;
+  AllocZeroInitializedFn* const alloc_zero_initialized_function;
+  AllocAlignedFn* const alloc_aligned_function;
+  ReallocFn* const realloc_function;
+  FreeFn* const free_function;
+
+  const AllocatorDispatch* next;
+
+  // |default_dispatch| is statically defined by one (and only one) of the
+  // allocator_shim_default_dispatch_to_*.cc files, depending on the build
+  // configuration.
+  static const AllocatorDispatch default_dispatch;
+};
+
+// When true makes malloc behave like new, w.r.t calling the new_handler if
+// the allocation fails (see set_new_mode() in Windows).
+BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
+
+// Allocates |size| bytes or returns nullptr. It does NOT call the new_handler,
+// regardless of SetCallNewHandlerOnMallocFailure().
+BASE_EXPORT void* UncheckedAlloc(size_t size);
+
+// Inserts |dispatch| in front of the allocator chain. This method is NOT
+// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
+// The callers have the responsibility of linearizing the changes to the chain
+// (or more likely call these always on the same thread).
+BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
+
+// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
+// removal of arbitrary elements from a singly linked list would require a lock
+// in malloc(), which we really don't want.
+BASE_EXPORT void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch);
+
+}  // namespace allocator
+}  // namespace base
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_H_
diff --git a/base/allocator/allocator_shim_default_dispatch_to_glibc.cc b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
new file mode 100644
index 0000000..02facba
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_glibc.cc
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to libc functions.
+// The code here is strongly inspired from tcmalloc's libc_override_glibc.h.
+
+extern "C" {
+void* __libc_malloc(size_t size);
+void* __libc_calloc(size_t n, size_t size);
+void* __libc_realloc(void* address, size_t size);
+void* __libc_memalign(size_t alignment, size_t size);
+void __libc_free(void* ptr);
+}  // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* GlibcMalloc(const AllocatorDispatch*, size_t size) {
+  return __libc_malloc(size);
+}
+
+void* GlibcCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+  return __libc_calloc(n, size);
+}
+
+void* GlibcRealloc(const AllocatorDispatch*, void* address, size_t size) {
+  return __libc_realloc(address, size);
+}
+
+void* GlibcMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+  return __libc_memalign(alignment, size);
+}
+
+void GlibcFree(const AllocatorDispatch*, void* address) {
+  __libc_free(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &GlibcMalloc,   /* alloc_function */
+    &GlibcCalloc,   /* alloc_zero_initialized_function */
+    &GlibcMemalign, /* alloc_aligned_function */
+    &GlibcRealloc,  /* realloc_function */
+    &GlibcFree,     /* free_function */
+    nullptr,        /* next */
+};
diff --git a/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
new file mode 100644
index 0000000..7955cb7
--- /dev/null
+++ b/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+
+// This translation unit defines a default dispatch for the allocator shim which
+// routes allocations to the original libc functions when using the link-time
+// -Wl,-wrap,malloc approach (see README.md).
+// The __real_X functions here are special symbols that the linker will relocate
+// against the real "X" undefined symbol, so that __real_malloc becomes the
+// equivalent of what an undefined malloc symbol reference would have been.
+// This is the counterpart of allocator_shim_override_linker_wrapped_symbols.h,
+// which routes the __wrap_X functions into the shim.
+
+extern "C" {
+void* __real_malloc(size_t);
+void* __real_calloc(size_t, size_t);
+void* __real_realloc(void*, size_t);
+void* __real_memalign(size_t, size_t);
+void* __real_free(void*);
+}  // extern "C"
+
+namespace {
+
+using base::allocator::AllocatorDispatch;
+
+void* RealMalloc(const AllocatorDispatch*, size_t size) {
+  return __real_malloc(size);
+}
+
+void* RealCalloc(const AllocatorDispatch*, size_t n, size_t size) {
+  return __real_calloc(n, size);
+}
+
+void* RealRealloc(const AllocatorDispatch*, void* address, size_t size) {
+  return __real_realloc(address, size);
+}
+
+void* RealMemalign(const AllocatorDispatch*, size_t alignment, size_t size) {
+  return __real_memalign(alignment, size);
+}
+
+void RealFree(const AllocatorDispatch*, void* address) {
+  __real_free(address);
+}
+
+}  // namespace
+
+const AllocatorDispatch AllocatorDispatch::default_dispatch = {
+    &RealMalloc,   /* alloc_function */
+    &RealCalloc,   /* alloc_zero_initialized_function */
+    &RealMemalign, /* alloc_aligned_function */
+    &RealRealloc,  /* realloc_function */
+    &RealFree,     /* free_function */
+    nullptr,       /* next */
+};
diff --git a/base/allocator/allocator_shim_internals.h b/base/allocator/allocator_shim_internals.h
new file mode 100644
index 0000000..fc3624c
--- /dev/null
+++ b/base/allocator/allocator_shim_internals.h
@@ -0,0 +1,27 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
+
+#if defined(__GNUC__)
+
+#include <sys/cdefs.h>  // for __THROW
+
+#ifndef __THROW  // Not a glibc system
+#ifdef _NOEXCEPT  // LLVM libc++ uses noexcept instead
+#define __THROW _NOEXCEPT
+#else
+#define __THROW
+#endif  // !_NOEXCEPT
+#endif
+
+// Shim layer symbols need to be ALWAYS exported, regardless of component build.
+#define SHIM_ALWAYS_EXPORT __attribute__((visibility("default")))
+
+#define SHIM_ALIAS_SYMBOL(fn) __attribute__((alias(#fn)))
+
+#endif  // __GNUC__
+
+#endif  // BASE_ALLOCATOR_ALLOCATOR_SHIM_INTERNALS_H_
diff --git a/base/allocator/allocator_shim_override_cpp_symbols.h b/base/allocator/allocator_shim_override_cpp_symbols.h
new file mode 100644
index 0000000..616716f
--- /dev/null
+++ b/base/allocator/allocator_shim_override_cpp_symbols.h
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_CPP_SYMBOLS_H_
+
+// Alias the default new/delete C++ symbols to the shim entry points.
+// This file is strongly inspired by tcmalloc's libc_override_redefine.h.
+
+#include <new>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size)
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size)
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void* operator new(size_t size,
+                                      const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void* operator new[](size_t size,
+                                        const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppNew);
+
+SHIM_ALWAYS_EXPORT void operator delete(void* p, const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
+
+SHIM_ALWAYS_EXPORT void operator delete[](void* p,
+                                          const std::nothrow_t&) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCppDelete);
diff --git a/base/allocator/allocator_shim_override_libc_symbols.h b/base/allocator/allocator_shim_override_libc_symbols.h
new file mode 100644
index 0000000..37b3b4eb
--- /dev/null
+++ b/base/allocator/allocator_shim_override_libc_symbols.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Its purpose is to SHIM_ALIAS_SYMBOL the Libc symbols for malloc/new to the
+// shim layer entry points.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LIBC_SYMBOLS_H_
+
+#include <malloc.h>
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* malloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimMalloc);
+
+SHIM_ALWAYS_EXPORT void free(void* ptr) __THROW
+    SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* realloc(void* ptr, size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimRealloc);
+
+SHIM_ALWAYS_EXPORT void* calloc(size_t n, size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimCalloc);
+
+SHIM_ALWAYS_EXPORT void cfree(void* ptr) __THROW
+    SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* memalign(size_t align, size_t s) __THROW
+    SHIM_ALIAS_SYMBOL(ShimMemalign);
+
+SHIM_ALWAYS_EXPORT void* valloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimValloc);
+
+SHIM_ALWAYS_EXPORT void* pvalloc(size_t size) __THROW
+    SHIM_ALIAS_SYMBOL(ShimPvalloc);
+
+SHIM_ALWAYS_EXPORT int posix_memalign(void** r, size_t a, size_t s) __THROW
+    SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
+
+// The default dispatch translation unit has to define also the following
+// symbols (unless they are ultimately routed to the system symbols):
+//   void malloc_stats(void);
+//   int mallopt(int, int);
+//   struct mallinfo mallinfo(void);
+//   size_t malloc_size(void*);
+//   size_t malloc_usable_size(const void*);
+
+}  // extern "C"
diff --git a/base/allocator/allocator_shim_override_linker_wrapped_symbols.h b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
new file mode 100644
index 0000000..5b85d6e
--- /dev/null
+++ b/base/allocator/allocator_shim_override_linker_wrapped_symbols.h
@@ -0,0 +1,44 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+#error This header is meant to be included only once by allocator_shim.cc
+#endif
+#define BASE_ALLOCATOR_ALLOCATOR_SHIM_OVERRIDE_LINKER_WRAPPED_SYMBOLS_H_
+
+// This header overrides the __wrap_X symbols when using the link-time
+// -Wl,-wrap,malloc shim-layer approach (see README.md).
+// All references to malloc, free, etc. within the linker unit that gets the
+// -wrap linker flags (e.g., libchrome.so) will be rewritten to the
+// linker as references to __wrap_malloc, __wrap_free, which are defined here.
+
+#include "base/allocator/allocator_shim_internals.h"
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void* __wrap_calloc(size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimCalloc);
+
+SHIM_ALWAYS_EXPORT void __wrap_free(void*)
+    SHIM_ALIAS_SYMBOL(ShimFree);
+
+SHIM_ALWAYS_EXPORT void* __wrap_malloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimMalloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_memalign(size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimMemalign);
+
+SHIM_ALWAYS_EXPORT int __wrap_posix_memalign(void**, size_t, size_t)
+    SHIM_ALIAS_SYMBOL(ShimPosixMemalign);
+
+SHIM_ALWAYS_EXPORT void* __wrap_pvalloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimPvalloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_realloc(void*, size_t)
+    SHIM_ALIAS_SYMBOL(ShimRealloc);
+
+SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t)
+    SHIM_ALIAS_SYMBOL(ShimValloc);
+
+}  // extern "C"
diff --git a/base/allocator/features.h b/base/allocator/features.h
new file mode 100644
index 0000000..2963c59
--- /dev/null
+++ b/base/allocator/features.h
@@ -0,0 +1,11 @@
+// Generated by build/write_buildflag_header.py
+// From "allocator_features"
+
+#ifndef BASE_ALLOCATOR_FEATURES_H_
+#define BASE_ALLOCATOR_FEATURES_H_
+
+#include "build/buildflag.h"
+
+#define BUILDFLAG_INTERNAL_USE_EXPERIMENTAL_ALLOCATOR_SHIM() (1)
+
+#endif  // BASE_ALLOCATOR_FEATURES_H_
diff --git a/base/allocator/prep_libc.py b/base/allocator/prep_libc.py
deleted file mode 100755
index a88d3bd..0000000
--- a/base/allocator/prep_libc.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# This script takes libcmt.lib for VS2013 and removes the allocation related
-# functions from it.
-#
-# Usage: prep_libc.py <VCLibDir> <OutputDir> <arch> [<environment_file>]
-#
-# VCLibDir is the path where VC is installed, something like:
-#    C:\Program Files\Microsoft Visual Studio 8\VC\lib
-#
-# OutputDir is the directory where the modified libcmt file should be stored.
-# arch is one of: 'ia32', 'x86' or 'x64'. ia32 and x86 are synonyms.
-#
-# If the environment_file argument is set, the environment variables in the
-# given file will be used to execute the VC tools. This file is in the same
-# format as the environment block passed to CreateProcess.
-
-import os
-import shutil
-import subprocess
-import sys
-
-def run(command, env_dict):
-  """Run |command|.  If any lines that match an error condition then
-      terminate.
-
-  The env_dict, will be used for the environment. None can be used to get the
-  default environment."""
-  error = 'cannot find member object'
-  # Need shell=True to search the path in env_dict for the executable.
-  popen = subprocess.Popen(
-      command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
-      env=env_dict)
-  out, _ = popen.communicate()
-  for line in out.splitlines():
-    print line
-    if error and line.find(error) != -1:
-      print 'prep_libc.py: Error stripping object from C runtime.'
-      sys.exit(1)
-
-def main():
-  bindir = 'SELF_X86'
-  objdir = 'INTEL'
-  vs_install_dir = sys.argv[1]
-  outdir = sys.argv[2]
-  if "x64" in sys.argv[3]:
-    bindir = 'SELF_64_amd64'
-    objdir = 'amd64'
-    vs_install_dir = os.path.join(vs_install_dir, 'amd64')
-
-  if len(sys.argv) == 5:
-    env_pairs = open(sys.argv[4]).read()[:-2].split('\0')
-    env_dict = dict([item.split('=', 1) for item in env_pairs])
-  else:
-    env_dict = None  # Use the default environment.
-
-  output_lib = os.path.join(outdir, 'libcmt.lib')
-  shutil.copyfile(os.path.join(vs_install_dir, 'libcmt.lib'), output_lib)
-  shutil.copyfile(os.path.join(vs_install_dir, 'libcmt.pdb'),
-                  os.path.join(outdir, 'libcmt.pdb'))
-  cvspath = 'f:\\binaries\\Intermediate\\vctools\\crt_bld\\' + bindir + \
-      '\\crt\\prebuild\\build\\' + objdir + '\\mt_obj\\nativec\\\\';
-  cppvspath = 'f:\\binaries\\Intermediate\\vctools\\crt_bld\\' + bindir + \
-      '\\crt\\prebuild\\build\\' + objdir + '\\mt_obj\\nativecpp\\\\';
-
-  cobjfiles = ['malloc', 'free', 'realloc', 'heapinit', 'calloc', 'recalloc',
-      'calloc_impl']
-  cppobjfiles = ['new', 'new2', 'delete', 'delete2', 'new_mode', 'newopnt',
-      'newaopnt']
-  for obj in cobjfiles:
-    cmd = ('lib /nologo /ignore:4006,4221 /remove:%s%s.obj %s' %
-           (cvspath, obj, output_lib))
-    run(cmd, env_dict)
-  for obj in cppobjfiles:
-    cmd = ('lib /nologo /ignore:4006,4221 /remove:%s%s.obj %s' %
-           (cppvspath, obj, output_lib))
-    run(cmd, env_dict)
-
-if __name__ == "__main__":
-  sys.exit(main())
diff --git a/base/base.gyp b/base/base.gyp
index d7f3519..86d2331 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -161,6 +161,8 @@
                 'cfgmgr32.lib',
                 'powrprof.lib',
                 'setupapi.lib',
+                'userenv.lib',
+                'winmm.lib',
               ],
             },
           },
@@ -177,18 +179,12 @@
                   'cfgmgr32.lib',
                   'powrprof.lib',
                   'setupapi.lib',
+                  'userenv.lib',
+                  'winmm.lib',
                 ],
               },
             },
           },
-          'copies': [
-            {
-              'destination': '<(PRODUCT_DIR)/',
-              'files': [
-                '../build/win/dbghelp_xp/dbghelp.dll',
-              ],
-            },
-          ],
           'dependencies': [
            'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
           ],
@@ -383,6 +379,7 @@
         'big_endian_unittest.cc',
         'bind_unittest.cc',
         'bind_unittest.nc',
+        'bit_cast_unittest.cc',
         'bits_unittest.cc',
         'build_time_unittest.cc',
         'callback_helpers_unittest.cc',
@@ -500,6 +497,7 @@
         'native_library_unittest.cc',
         'numerics/safe_numerics_unittest.cc',
         'observer_list_unittest.cc',
+        'optional_unittest.cc',
         'os_compat_android_unittest.cc',
         'path_service_unittest.cc',
         'pickle_unittest.cc',
@@ -549,9 +547,20 @@
         'system_monitor/system_monitor_unittest.cc',
         'task/cancelable_task_tracker_unittest.cc',
         'task_runner_util_unittest.cc',
+        'task_scheduler/delayed_task_manager_unittest.cc',
+        'task_scheduler/priority_queue_unittest.cc',
         'task_scheduler/scheduler_lock_unittest.cc',
+        'task_scheduler/scheduler_service_thread_unittest.cc',
+        'task_scheduler/scheduler_thread_pool_impl_unittest.cc',
+        'task_scheduler/scheduler_worker_thread_stack_unittest.cc',
+        'task_scheduler/scheduler_worker_thread_unittest.cc',
         'task_scheduler/sequence_sort_key_unittest.cc',
         'task_scheduler/sequence_unittest.cc',
+        'task_scheduler/task_scheduler_impl_unittest.cc',
+        'task_scheduler/task_tracker_unittest.cc',
+        'task_scheduler/test_task_factory.cc',
+        'task_scheduler/test_task_factory.h',
+        'task_scheduler/test_utils.h',
         'template_util_unittest.cc',
         'test/histogram_tester_unittest.cc',
         'test/test_pending_task_unittest.cc',
@@ -602,6 +611,7 @@
         'win/scoped_variant_unittest.cc',
         'win/shortcut_unittest.cc',
         'win/startup_information_unittest.cc',
+        'win/wait_chain_unittest.cc',
         'win/win_util_unittest.cc',
         'win/windows_version_unittest.cc',
         'win/wrapped_window_proc_unittest.cc',
@@ -887,6 +897,8 @@
         'test/perf_time_logger.h',
         'test/power_monitor_test_base.cc',
         'test/power_monitor_test_base.h',
+        'test/scoped_command_line.cc',
+        'test/scoped_command_line.h',
         'test/scoped_locale.cc',
         'test/scoped_locale.h',
         'test/scoped_path_override.cc',
@@ -914,6 +926,8 @@
         'test/test_io_thread.h',
         'test/test_listener_ios.h',
         'test/test_listener_ios.mm',
+        'test/test_message_loop.cc',
+        'test/test_message_loop.h',
         'test/test_mock_time_task_runner.cc',
         'test/test_mock_time_task_runner.h',
         'test/test_pending_task.cc',
@@ -1144,6 +1158,8 @@
                 'cfgmgr32.lib',
                 'powrprof.lib',
                 'setupapi.lib',
+                'userenv.lib',
+                'winmm.lib',
               ],
             },
           },
@@ -1160,6 +1176,8 @@
                   'cfgmgr32.lib',
                   'powrprof.lib',
                   'setupapi.lib',
+                  'userenv.lib',
+                  'winmm.lib',
                 ],
               },
             },
@@ -1365,6 +1383,7 @@
             'android/java/src/org/chromium/base/ApplicationStatus.java',
             'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
             'android/java/src/org/chromium/base/BuildInfo.java',
+            'android/java/src/org/chromium/base/Callback.java',
             'android/java/src/org/chromium/base/CommandLine.java',
             'android/java/src/org/chromium/base/ContentUriUtils.java',
             'android/java/src/org/chromium/base/ContextUtils.java',
diff --git a/base/base.gypi b/base/base.gypi
index 5d7693f..dc86c59 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -32,6 +32,8 @@
           'android/base_jni_registrar.h',
           'android/build_info.cc',
           'android/build_info.h',
+          'android/callback_android.cc',
+          'android/callback_android.h',
           'android/command_line_android.cc',
           'android/command_line_android.h',
           'android/content_uri_utils.cc',
@@ -240,8 +242,6 @@
           'gtest_prod_util.h',
           'guid.cc',
           'guid.h',
-          'guid_posix.cc',
-          'guid_win.cc',
           'hash.cc',
           'hash.h',
           'id_map.h',
@@ -346,6 +346,7 @@
           'memory/discardable_memory_allocator.h',
           'memory/discardable_shared_memory.cc',
           'memory/discardable_shared_memory.h',
+          'memory/free_deleter.h',
           'memory/linked_ptr.h',
           'memory/manual_constructor.h',
           'memory/memory_pressure_listener.cc',
@@ -366,7 +367,6 @@
           'memory/ref_counted_memory.cc',
           'memory/ref_counted_memory.h',
           'memory/scoped_policy.h',
-          'memory/scoped_ptr.h',
           'memory/scoped_vector.h',
           'memory/shared_memory.h',
           'memory/shared_memory_android.cc',
@@ -445,6 +445,7 @@
           'numerics/safe_math_impl.h',
           'observer_list.h',
           'observer_list_threadsafe.h',
+          'optional.h',
           'os_compat_android.cc',
           'os_compat_android.h',
           'os_compat_nacl.cc',
@@ -625,7 +626,7 @@
           'sys_info_internal.h',
           'sys_info_ios.mm',
           'sys_info_linux.cc',
-          'sys_info_mac.cc',
+          'sys_info_mac.mm',
           'sys_info_openbsd.cc',
           'sys_info_posix.cc',
           'sys_info_win.cc',
@@ -636,15 +637,34 @@
           'task_runner.cc',
           'task_runner.h',
           'task_runner_util.h',
+          'task_scheduler/delayed_task_manager.cc',
+          'task_scheduler/delayed_task_manager.h',
+          'task_scheduler/priority_queue.cc',
+          'task_scheduler/priority_queue.h',
           'task_scheduler/scheduler_lock.h',
           'task_scheduler/scheduler_lock_impl.cc',
           'task_scheduler/scheduler_lock_impl.h',
+          'task_scheduler/scheduler_service_thread.cc',
+          'task_scheduler/scheduler_service_thread.h',
+          'task_scheduler/scheduler_thread_pool.h',
+          'task_scheduler/scheduler_thread_pool_impl.cc',
+          'task_scheduler/scheduler_thread_pool_impl.h',
+          'task_scheduler/scheduler_worker_thread.cc',
+          'task_scheduler/scheduler_worker_thread.h',
+          'task_scheduler/scheduler_worker_thread_stack.cc',
+          'task_scheduler/scheduler_worker_thread_stack.h',
           'task_scheduler/sequence.cc',
           'task_scheduler/sequence.h',
           'task_scheduler/sequence_sort_key.cc',
           'task_scheduler/sequence_sort_key.h',
           'task_scheduler/task.cc',
           'task_scheduler/task.h',
+          'task_scheduler/task_scheduler.cc',
+          'task_scheduler/task_scheduler.h',
+          'task_scheduler/task_scheduler_impl.cc',
+          'task_scheduler/task_scheduler_impl.h',
+          'task_scheduler/task_tracker.cc',
+          'task_scheduler/task_tracker.h',
           'task_scheduler/task_traits.cc',
           'task_scheduler/task_traits.h',
           'template_util.h',
@@ -657,8 +677,6 @@
           'third_party/nspr/prtime.h',
           'third_party/superfasthash/superfasthash.c',
           'third_party/xdg_mime/xdgmime.h',
-          'thread_task_runner_handle.cc',
-          'thread_task_runner_handle.h',
           'threading/non_thread_safe.h',
           'threading/non_thread_safe_impl.cc',
           'threading/non_thread_safe_impl.h',
@@ -697,6 +715,8 @@
           'threading/thread_local_win.cc',
           'threading/thread_restrictions.cc',
           'threading/thread_restrictions.h',
+          'threading/thread_task_runner_handle.cc',
+          'threading/thread_task_runner_handle.h',
           'threading/watchdog.cc',
           'threading/watchdog.h',
           'threading/worker_pool.cc',
@@ -782,6 +802,8 @@
           'win/shortcut.h',
           'win/startup_information.cc',
           'win/startup_information.h',
+          'win/wait_chain.cc',
+          'win/wait_chain.h',
           'win/win_util.cc',
           'win/win_util.h',
           'win/windows_version.cc',
diff --git a/base/base.isolate b/base/base.isolate
index e2d8bea..079d07d 100644
--- a/base/base.isolate
+++ b/base/base.isolate
@@ -27,14 +27,6 @@
         ],
       },
     }],
-    ['OS=="win"', {
-      # Required for base/stack_trace_win.cc to symbolize correctly.
-      'variables': {
-        'files': [
-          '<(PRODUCT_DIR)/dbghelp.dll',
-        ],
-      },
-    }],
     ['OS=="win" and asan==1 and component=="shared_library"', {
       'variables': {
         'files': [
diff --git a/base/base_nacl.gyp b/base/base_nacl.gyp
index 2713565..30763d4 100644
--- a/base/base_nacl.gyp
+++ b/base/base_nacl.gyp
@@ -40,6 +40,7 @@
             ],
           },
           'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
             'base.gyp:base_debugging_flags',
             'base.gyp:base_build_date',
           ],
@@ -64,6 +65,7 @@
             ],
           },
           'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
             'base.gyp:base_build_date',
             '../third_party/icu/icu_nacl.gyp:icudata_nacl',
             '../third_party/icu/icu_nacl.gyp:icui18n_nacl',
@@ -119,6 +121,7 @@
             'rand_util_nacl.cc',
           ],
           'dependencies': [
+            'allocator/allocator.gyp:allocator_features#target',
             'base.gyp:base_debugging_flags',
             'base.gyp:base_build_date',
             'third_party/libevent/libevent_nacl_nonsfi.gyp:event_nacl_nonsfi',
diff --git a/base/base_switches.cc b/base/base_switches.cc
index d1a38e4..fa0bc33 100644
--- a/base/base_switches.cc
+++ b/base/base_switches.cc
@@ -20,6 +20,10 @@
 // the memory-infra category is enabled.
 const char kEnableHeapProfiling[]           = "enable-heap-profiling";
 
+// Report native (walk the stack) allocation traces. By default pseudo stacks
+// derived from trace events are reported.
+const char kEnableHeapProfilingModeNative[] = "native";
+
 // Generates full memory crash dump.
 const char kFullMemoryCrashReport[]         = "full-memory-crash-report";
 
diff --git a/base/base_switches.h b/base/base_switches.h
index 300c5f7..b80077b 100644
--- a/base/base_switches.h
+++ b/base/base_switches.h
@@ -15,6 +15,7 @@
 extern const char kDisableLowEndDeviceMode[];
 extern const char kEnableCrashReporter[];
 extern const char kEnableHeapProfiling[];
+extern const char kEnableHeapProfilingModeNative[];
 extern const char kEnableLowEndDeviceMode[];
 extern const char kForceFieldTrials[];
 extern const char kFullMemoryCrashReport[];
diff --git a/base/bind_helpers.h b/base/bind_helpers.h
index 117fc68..590d788 100644
--- a/base/bind_helpers.h
+++ b/base/bind_helpers.h
@@ -28,6 +28,9 @@
 // argument will CHECK() because the first invocation would have already
 // transferred ownership to the target function.
 //
+// RetainedRef() accepts a ref counted object and retains a reference to it.
+// When the callback is called, the object is passed as a raw pointer.
+//
 // ConstRef() allows binding a constant reference to an argument rather
 // than a copy.
 //
@@ -71,6 +74,19 @@
 // Without Owned(), someone would have to know to delete |pn| when the last
 // reference to the Callback is deleted.
 //
+// EXAMPLE OF RetainedRef():
+//
+//    void foo(RefCountedBytes* bytes) {}
+//
+//    scoped_refptr<RefCountedBytes> bytes = ...;
+//    Closure callback = Bind(&foo, base::RetainedRef(bytes));
+//    callback.Run();
+//
+// Without RetainedRef, the scoped_refptr would try to implicitly convert to
+// a raw pointer and fail compilation:
+//
+//    Closure callback = Bind(&foo, bytes); // ERROR!
+//
 //
 // EXAMPLE OF ConstRef():
 //
@@ -105,10 +121,11 @@
 //
 // EXAMPLE OF Passed():
 //
-//   void TakesOwnership(scoped_ptr<Foo> arg) { }
-//   scoped_ptr<Foo> CreateFoo() { return scoped_ptr<Foo>(new Foo()); }
+//   void TakesOwnership(std::unique_ptr<Foo> arg) { }
+//   std::unique_ptr<Foo> CreateFoo() { return std::unique_ptr<Foo>(new Foo());
+//   }
 //
-//   scoped_ptr<Foo> f(new Foo());
+//   std::unique_ptr<Foo> f(new Foo());
 //
 //   // |cb| is given ownership of Foo(). |f| is now NULL.
 //   // You can use std::move(f) in place of &f, but it's more verbose.
@@ -312,6 +329,16 @@
 };
 
 template <typename T>
+class RetainedRefWrapper {
+ public:
+  explicit RetainedRefWrapper(T* o) : ptr_(o) {}
+  explicit RetainedRefWrapper(scoped_refptr<T> o) : ptr_(std::move(o)) {}
+  T* get() const { return ptr_.get(); }
+ private:
+  scoped_refptr<T> ptr_;
+};
+
+template <typename T>
 struct IgnoreResultHelper {
   explicit IgnoreResultHelper(T functor) : functor_(functor) {}
 
@@ -327,7 +354,7 @@
 
 // An alternate implementation is to avoid the destructive copy, and instead
 // specialize ParamTraits<> for OwnedWrapper<> to change the StorageType to
-// a class that is essentially a scoped_ptr<>.
+// a class that is essentially a std::unique_ptr<>.
 //
 // The current implementation has the benefit though of leaving ParamTraits<>
 // fully in callback_internal.h as well as avoiding type conversions during
@@ -405,7 +432,7 @@
 }
 
 template <typename T>
-T* Unwrap(const scoped_refptr<T>& o) {
+T* Unwrap(const RetainedRefWrapper<T>& o) {
   return o.get();
 }
 
@@ -545,6 +572,16 @@
 }
 
 template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
+  return internal::RetainedRefWrapper<T>(o);
+}
+
+template <typename T>
+static inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
+  return internal::RetainedRefWrapper<T>(std::move(o));
+}
+
+template <typename T>
 static inline internal::ConstRefWrapper<T> ConstRef(const T& o) {
   return internal::ConstRefWrapper<T>(o);
 }
diff --git a/base/bind_internal.h b/base/bind_internal.h
index 199467c..6e0a425 100644
--- a/base/bind_internal.h
+++ b/base/bind_internal.h
@@ -176,9 +176,14 @@
       : method_(method) {
   }
 
-  template <typename... RunArgs>
-  R Run(T* object, RunArgs&&... args) {
-    return (object->*method_)(std::forward<RunArgs>(args)...);
+  template <typename Receiver, typename... RunArgs>
+  R Run(Receiver&& receiver_ptr, RunArgs&&... args) {
+    // Clang skips CV qualifier check on a method pointer invocation when the
+    // receiver is a subclass. Store the receiver into a const reference to
+    // T to ensure the CV check works.
+    // https://llvm.org/bugs/show_bug.cgi?id=27037
+    T& receiver = *receiver_ptr;
+    return (receiver.*method_)(std::forward<RunArgs>(args)...);
   }
 
  private:
@@ -196,9 +201,14 @@
       : method_(method) {
   }
 
-  template <typename... RunArgs>
-  R Run(const T* object, RunArgs&&... args) {
-    return (object->*method_)(std::forward<RunArgs>(args)...);
+  template <typename Receiver, typename... RunArgs>
+  R Run(Receiver&& receiver_ptr, RunArgs&&... args) {
+    // Clang skips CV qualifier check on a method pointer invocation when the
+    // receiver is a subclass. Store the receiver into a unqualified reference
+    // to T to ensure the CV check works.
+    // https://llvm.org/bugs/show_bug.cgi?id=27037
+    const T& receiver = *receiver_ptr;
+    return (receiver.*method_)(std::forward<RunArgs>(args)...);
   }
 
  private:
diff --git a/base/bind_internal_win.h b/base/bind_internal_win.h
index 2ee12ef..2def8743 100644
--- a/base/bind_internal_win.h
+++ b/base/bind_internal_win.h
@@ -8,6 +8,8 @@
 #ifndef BASE_BIND_INTERNAL_WIN_H_
 #define BASE_BIND_INTERNAL_WIN_H_
 
+#include <utility>
+
 #include "build/build_config.h"
 
 // In the x64 architecture in Windows, __fastcall, __stdcall, etc, are all
@@ -33,8 +35,9 @@
       : function_(function) {
   }
 
-  R Run(typename CallbackParamTraits<Args>::ForwardType... args) {
-    return function_(args...);
+  template <typename... RunArgs>
+  R Run(RunArgs&&... args) {
+    return function_(std::forward<RunArgs>(args)...);
   }
 
  private:
@@ -53,8 +56,9 @@
       : function_(function) {
   }
 
-  R Run(typename CallbackParamTraits<Args>::ForwardType... args) {
-    return function_(args...);
+  template <typename... RunArgs>
+  R Run(RunArgs&&... args) {
+    return function_(std::forward<RunArgs>(args)...);
   }
 
  private:
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index 405dde8..de18865 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -10,8 +10,8 @@
 
 #include "base/callback.h"
 #include "base/macros.h"
+#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/weak_ptr.h"
 #include "build/build_config.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -768,15 +768,10 @@
 }
 
 TEST_F(BindTest, ScopedRefptr) {
-  // BUG: The scoped_refptr should cause the only AddRef()/Release() pair. But
-  // due to a bug in base::Bind(), there's an extra call when invoking the
-  // callback.
-  // https://code.google.com/p/chromium/issues/detail?id=251937
-  EXPECT_CALL(has_ref_, AddRef()).Times(2);
-  EXPECT_CALL(has_ref_, Release()).Times(2);
+  EXPECT_CALL(has_ref_, AddRef()).Times(1);
+  EXPECT_CALL(has_ref_, Release()).Times(1);
 
-  const scoped_refptr<StrictMock<HasRef> > refptr(&has_ref_);
-
+  const scoped_refptr<HasRef> refptr(&has_ref_);
   Callback<int()> scoped_refptr_const_ref_cb =
       Bind(&FunctionWithScopedRefptrFirstParam, base::ConstRef(refptr), 1);
   EXPECT_EQ(1, scoped_refptr_const_ref_cb.Run());
@@ -807,6 +802,12 @@
   EXPECT_EQ(1, deletes);
 }
 
+TEST_F(BindTest, UniquePtrReceiver) {
+  std::unique_ptr<StrictMock<NoRef>> no_ref(new StrictMock<NoRef>);
+  EXPECT_CALL(*no_ref, VoidMethod0()).Times(1);
+  Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
+}
+
 // Tests for Passed() wrapper support:
 //   - Passed() can be constructed from a pointer to scoper.
 //   - Passed() can be constructed from a scoper rvalue.
@@ -822,7 +823,7 @@
 };
 
 using MoveOnlyTypesToTest =
-    ::testing::Types<scoped_ptr<DeleteCounter>,
+    ::testing::Types<std::unique_ptr<DeleteCounter>,
                      std::unique_ptr<DeleteCounter>,
                      std::unique_ptr<DeleteCounter, CustomDeleter>>;
 TYPED_TEST_CASE(BindMoveOnlyTypeTest, MoveOnlyTypesToTest);
@@ -879,23 +880,23 @@
   EXPECT_EQ(1, deletes);
 }
 
-void VerifyVector(const std::vector<scoped_ptr<int>>& v) {
+void VerifyVector(const std::vector<std::unique_ptr<int>>& v) {
   ASSERT_EQ(1u, v.size());
   EXPECT_EQ(12345, *v[0]);
 }
 
-std::vector<scoped_ptr<int>> AcceptAndReturnMoveOnlyVector(
-    std::vector<scoped_ptr<int>> v) {
+std::vector<std::unique_ptr<int>> AcceptAndReturnMoveOnlyVector(
+    std::vector<std::unique_ptr<int>> v) {
   VerifyVector(v);
   return v;
 }
 
 // Test that a vector containing move-only types can be used with Callback.
 TEST_F(BindTest, BindMoveOnlyVector) {
-  using MoveOnlyVector = std::vector<scoped_ptr<int>>;
+  using MoveOnlyVector = std::vector<std::unique_ptr<int>>;
 
   MoveOnlyVector v;
-  v.push_back(make_scoped_ptr(new int(12345)));
+  v.push_back(WrapUnique(new int(12345)));
 
   // Early binding should work:
   base::Callback<MoveOnlyVector()> bound_cb =
diff --git a/base/build_time_unittest.cc b/base/build_time_unittest.cc
index aac64a7..64886b4 100644
--- a/base/build_time_unittest.cc
+++ b/base/build_time_unittest.cc
@@ -3,36 +3,43 @@
 // found in the LICENSE file.
 
 #include "base/build_time.h"
+#if !defined(DONT_EMBED_BUILD_METADATA)
+#include "base/generated_build_date.h"
+#endif
+#include "base/time/time.h"
 
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(BuildTime, DateLooksValid) {
 #if !defined(DONT_EMBED_BUILD_METADATA)
-  char build_date[] = __DATE__;
+  char build_date[] = BUILD_DATE;
 #else
-  char build_date[] = "Sep 02 2008";
+  char build_date[] = "Sep 02 2008 05:00:00";
 #endif
 
-  EXPECT_EQ(11u, strlen(build_date));
+  EXPECT_EQ(20u, strlen(build_date));
   EXPECT_EQ(' ', build_date[3]);
   EXPECT_EQ(' ', build_date[6]);
+  EXPECT_EQ(' ', build_date[11]);
+  EXPECT_EQ('0', build_date[12]);
+  EXPECT_EQ('5', build_date[13]);
+  EXPECT_EQ(':', build_date[14]);
+  EXPECT_EQ('0', build_date[15]);
+  EXPECT_EQ('0', build_date[16]);
+  EXPECT_EQ(':', build_date[17]);
+  EXPECT_EQ('0', build_date[18]);
+  EXPECT_EQ('0', build_date[19]);
 }
 
-TEST(BuildTime, TimeLooksValid) {
-#if defined(DONT_EMBED_BUILD_METADATA)
-  char build_time[] = "08:00:00";
-#else
-  char build_time[] = __TIME__;
+TEST(BuildTime, InThePast) {
+  EXPECT_LT(base::GetBuildTime(), base::Time::Now());
+  EXPECT_LT(base::GetBuildTime(), base::Time::NowFromSystemTime());
+}
+
+#if !defined(DONT_EMBED_BUILD_METADATA)
+TEST(BuildTime, NotTooFar) {
+  // BuildTime must be less than 45 days old.
+  base::Time cutoff(base::Time::Now() - base::TimeDelta::FromDays(45));
+  EXPECT_GT(base::GetBuildTime(), cutoff);
+}
 #endif
-
-  EXPECT_EQ(8u, strlen(build_time));
-  EXPECT_EQ(':', build_time[2]);
-  EXPECT_EQ(':', build_time[5]);
-}
-
-TEST(BuildTime, DoesntCrash) {
-  // Since __DATE__ isn't updated unless one does a clobber build, we can't
-  // really test the value returned by it, except to check that it doesn't
-  // crash.
-  base::GetBuildTime();
-}
diff --git a/base/callback.h b/base/callback.h
index c04e90d..abb907b 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -187,8 +187,8 @@
 //
 // PASSING PARAMETERS AS A scoped_ptr
 //
-//   void TakesOwnership(scoped_ptr<Foo> arg) {}
-//   scoped_ptr<Foo> f(new Foo);
+//   void TakesOwnership(std::unique_ptr<Foo> arg) {}
+//   std::unique_ptr<Foo> f(new Foo);
 //   // f becomes null during the following call.
 //   base::Closure cb = base::Bind(&TakesOwnership, base::Passed(&f));
 //
diff --git a/base/callback_internal.h b/base/callback_internal.h
index 439ce6d..3e8e10f 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -8,18 +8,11 @@
 #ifndef BASE_CALLBACK_INTERNAL_H_
 #define BASE_CALLBACK_INTERNAL_H_
 
-#include <stddef.h>
-#include <map>
-#include <memory>
-#include <type_traits>
-#include <vector>
-
 #include "base/atomic_ref_count.h"
 #include "base/base_export.h"
 #include "base/callback_forward.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 
 namespace base {
 namespace internal {
@@ -118,129 +111,6 @@
 extern template class CallbackBase<CopyMode::MoveOnly>;
 extern template class CallbackBase<CopyMode::Copyable>;
 
-// A helper template to determine if given type is non-const move-only-type,
-// i.e. if a value of the given type should be passed via std::move() in a
-// destructive way. Types are considered to be move-only if they have a
-// sentinel MoveOnlyTypeForCPP03 member: a class typically gets this from using
-// the DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND macro.
-// It would be easy to generalize this trait to all move-only types... but this
-// confuses template deduction in VS2013 with certain types such as
-// std::unique_ptr.
-// TODO(dcheng): Revisit this when Windows switches to VS2015 by default.
-
-template <typename T> struct IsMoveOnlyType {
-  // Types YesType and NoType are guaranteed such that sizeof(YesType) <
-  // sizeof(NoType).
-  using YesType = char;
-  struct NoType { YesType dummy[2]; };
-
-  template <typename U>
-  static YesType Test(const typename U::MoveOnlyTypeForCPP03*);
-
-  template <typename U>
-  static NoType Test(...);
-
-  static const bool value = sizeof((Test<T>(0))) == sizeof(YesType) &&
-                            !std::is_const<T>::value;
-};
-
-// Specialization of IsMoveOnlyType so that std::unique_ptr is still considered
-// move-only, even without the sentinel member.
-template <typename T, typename D>
-struct IsMoveOnlyType<std::unique_ptr<T, D>> : std::true_type {};
-
-// Specialization of std::vector, so that it's considered move-only if the
-// element type is move-only. Allocator is explicitly ignored when determining
-// move-only status of the std::vector.
-template <typename T, typename Allocator>
-struct IsMoveOnlyType<std::vector<T, Allocator>> : IsMoveOnlyType<T> {};
-
-template <typename>
-struct CallbackParamTraitsForMoveOnlyType;
-
-template <typename>
-struct CallbackParamTraitsForNonMoveOnlyType;
-
-// TODO(tzik): Use a default parameter once MSVS supports variadic templates
-// with default values.
-// http://connect.microsoft.com/VisualStudio/feedbackdetail/view/957801/compilation-error-with-variadic-templates
-//
-// This is a typetraits object that's used to take an argument type, and
-// extract a suitable type for forwarding arguments.
-template <typename T>
-struct CallbackParamTraits
-    : std::conditional<IsMoveOnlyType<T>::value,
-         CallbackParamTraitsForMoveOnlyType<T>,
-         CallbackParamTraitsForNonMoveOnlyType<T>>::type {
-};
-
-template <typename T>
-struct CallbackParamTraitsForNonMoveOnlyType {
-  using ForwardType = const T&;
-};
-
-// Note that for array types, we implicitly add a const in the conversion. This
-// means that it is not possible to bind array arguments to functions that take
-// a non-const pointer. Trying to specialize the template based on a "const
-// T[n]" does not seem to match correctly, so we are stuck with this
-// restriction.
-template <typename T, size_t n>
-struct CallbackParamTraitsForNonMoveOnlyType<T[n]> {
-  using ForwardType = const T*;
-};
-
-// See comment for CallbackParamTraits<T[n]>.
-template <typename T>
-struct CallbackParamTraitsForNonMoveOnlyType<T[]> {
-  using ForwardType = const T*;
-};
-
-// Parameter traits for movable-but-not-copyable scopers.
-//
-// Callback<>/Bind() understands movable-but-not-copyable semantics where
-// the type cannot be copied but can still have its state destructively
-// transferred (aka. moved) to another instance of the same type by calling a
-// helper function.  When used with Bind(), this signifies transferal of the
-// object's state to the target function.
-//
-// For these types, the ForwardType must not be a const reference, or a
-// reference.  A const reference is inappropriate, and would break const
-// correctness, because we are implementing a destructive move.  A non-const
-// reference cannot be used with temporaries which means the result of a
-// function or a cast would not be usable with Callback<> or Bind().
-template <typename T>
-struct CallbackParamTraitsForMoveOnlyType {
-  using ForwardType = T;
-};
-
-// CallbackForward() is a very limited simulation of C++11's std::forward()
-// used by the Callback/Bind system for a set of movable-but-not-copyable
-// types.  It is needed because forwarding a movable-but-not-copyable
-// argument to another function requires us to invoke the proper move
-// operator to create a rvalue version of the type.  The supported types are
-// whitelisted below as overloads of the CallbackForward() function. The
-// default template compiles out to be a no-op.
-//
-// In C++11, std::forward would replace all uses of this function.  However, it
-// is impossible to implement a general std::forward without C++11 due to a lack
-// of rvalue references.
-//
-// In addition to Callback/Bind, this is used by PostTaskAndReplyWithResult to
-// simulate std::forward() and forward the result of one Callback as a
-// parameter to another callback. This is to support Callbacks that return
-// the movable-but-not-copyable types whitelisted above.
-template <typename T>
-typename std::enable_if<!IsMoveOnlyType<T>::value, T>::type& CallbackForward(
-    T& t) {
-  return t;
-}
-
-template <typename T>
-typename std::enable_if<IsMoveOnlyType<T>::value, T>::type CallbackForward(
-    T& t) {
-  return std::move(t);
-}
-
 }  // namespace internal
 }  // namespace base
 
diff --git a/base/callback_list.h b/base/callback_list.h
index 7d6a478..7ab79dd 100644
--- a/base/callback_list.h
+++ b/base/callback_list.h
@@ -6,13 +6,12 @@
 #define BASE_CALLBACK_LIST_H_
 
 #include <list>
+#include <memory>
 
 #include "base/callback.h"
-#include "base/callback_internal.h"
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 
 // OVERVIEW:
 //
@@ -29,7 +28,7 @@
 //
 //   typedef base::Callback<void(const Foo&)> OnFooCallback;
 //
-//   scoped_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//   std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
 //   RegisterCallback(const OnFooCallback& cb) {
 //     return callback_list_.Add(cb);
 //   }
@@ -62,7 +61,7 @@
 //     // Do something.
 //   }
 //
-//   scoped_ptr<base::CallbackList<void(const Foo&)>::Subscription>
+//   std::unique_ptr<base::CallbackList<void(const Foo&)>::Subscription>
 //       foo_subscription_;
 //
 //   DISALLOW_COPY_AND_ASSIGN(MyWidgetListener);
@@ -103,9 +102,9 @@
   // Add a callback to the list. The callback will remain registered until the
   // returned Subscription is destroyed, which must occur before the
   // CallbackList is destroyed.
-  scoped_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
+  std::unique_ptr<Subscription> Add(const CallbackType& cb) WARN_UNUSED_RESULT {
     DCHECK(!cb.is_null());
-    return scoped_ptr<Subscription>(
+    return std::unique_ptr<Subscription>(
         new Subscription(this, callbacks_.insert(callbacks_.end(), cb)));
   }
 
@@ -211,8 +210,8 @@
 
   CallbackList() {}
 
-  void Notify(
-      typename internal::CallbackParamTraits<Args>::ForwardType... args) {
+  template <typename... RunArgs>
+  void Notify(RunArgs&&... args) {
     typename internal::CallbackListBase<CallbackType>::Iterator it =
         this->GetIterator();
     CallbackType* cb;
diff --git a/base/callback_list_unittest.cc b/base/callback_list_unittest.cc
index 010efc5..62081e9 100644
--- a/base/callback_list_unittest.cc
+++ b/base/callback_list_unittest.cc
@@ -4,12 +4,12 @@
 
 #include "base/callback_list.h"
 
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
 #include "base/bind_helpers.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -38,7 +38,7 @@
     removal_subscription_.reset();
   }
   void SetSubscriptionToRemove(
-      scoped_ptr<CallbackList<void(void)>::Subscription> sub) {
+      std::unique_ptr<CallbackList<void(void)>::Subscription> sub) {
     removal_subscription_ = std::move(sub);
   }
 
@@ -46,7 +46,7 @@
 
  private:
   int total_;
-  scoped_ptr<CallbackList<void(void)>::Subscription> removal_subscription_;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> removal_subscription_;
   DISALLOW_COPY_AND_ASSIGN(Remover);
 };
 
@@ -74,7 +74,7 @@
   bool added_;
   int total_;
   CallbackList<void(void)>* cb_reg_;
-  scoped_ptr<CallbackList<void(void)>::Subscription> subscription_;
+  std::unique_ptr<CallbackList<void(void)>::Subscription> subscription_;
   DISALLOW_COPY_AND_ASSIGN(Adder);
 };
 
@@ -118,42 +118,43 @@
   Summer s;
 
   CallbackList<void(int)> c1;
-  scoped_ptr<CallbackList<void(int)>::Subscription> subscription1 =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> subscription1 =
       c1.Add(Bind(&Summer::AddOneParam, Unretained(&s)));
 
   c1.Notify(1);
   EXPECT_EQ(1, s.value());
 
   CallbackList<void(int, int)> c2;
-  scoped_ptr<CallbackList<void(int, int)>::Subscription> subscription2 =
+  std::unique_ptr<CallbackList<void(int, int)>::Subscription> subscription2 =
       c2.Add(Bind(&Summer::AddTwoParam, Unretained(&s)));
 
   c2.Notify(1, 2);
   EXPECT_EQ(3, s.value());
 
   CallbackList<void(int, int, int)> c3;
-  scoped_ptr<CallbackList<void(int, int, int)>::Subscription>
+  std::unique_ptr<CallbackList<void(int, int, int)>::Subscription>
       subscription3 = c3.Add(Bind(&Summer::AddThreeParam, Unretained(&s)));
 
   c3.Notify(1, 2, 3);
   EXPECT_EQ(6, s.value());
 
   CallbackList<void(int, int, int, int)> c4;
-  scoped_ptr<CallbackList<void(int, int, int, int)>::Subscription>
+  std::unique_ptr<CallbackList<void(int, int, int, int)>::Subscription>
       subscription4 = c4.Add(Bind(&Summer::AddFourParam, Unretained(&s)));
 
   c4.Notify(1, 2, 3, 4);
   EXPECT_EQ(10, s.value());
 
   CallbackList<void(int, int, int, int, int)> c5;
-  scoped_ptr<CallbackList<void(int, int, int, int, int)>::Subscription>
+  std::unique_ptr<CallbackList<void(int, int, int, int, int)>::Subscription>
       subscription5 = c5.Add(Bind(&Summer::AddFiveParam, Unretained(&s)));
 
   c5.Notify(1, 2, 3, 4, 5);
   EXPECT_EQ(15, s.value());
 
   CallbackList<void(int, int, int, int, int, int)> c6;
-  scoped_ptr<CallbackList<void(int, int, int, int, int, int)>::Subscription>
+  std::unique_ptr<
+      CallbackList<void(int, int, int, int, int, int)>::Subscription>
       subscription6 = c6.Add(Bind(&Summer::AddSixParam, Unretained(&s)));
 
   c6.Notify(1, 2, 3, 4, 5, 6);
@@ -166,9 +167,9 @@
   CallbackList<void(void)> cb_reg;
   Listener a, b, c;
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
 
   EXPECT_TRUE(a_subscription.get());
@@ -181,7 +182,7 @@
 
   b_subscription.reset();
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> c_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> c_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&c)));
 
   cb_reg.Notify();
@@ -201,9 +202,9 @@
   CallbackList<void(int)> cb_reg;
   Listener a(1), b(-1), c(1);
 
-  scoped_ptr<CallbackList<void(int)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&a)));
-  scoped_ptr<CallbackList<void(int)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&b)));
 
   EXPECT_TRUE(a_subscription.get());
@@ -216,7 +217,7 @@
 
   b_subscription.reset();
 
-  scoped_ptr<CallbackList<void(int)>::Subscription> c_subscription =
+  std::unique_ptr<CallbackList<void(int)>::Subscription> c_subscription =
       cb_reg.Add(Bind(&Listener::IncrementByMultipleOfScaler, Unretained(&c)));
 
   cb_reg.Notify(10);
@@ -237,15 +238,15 @@
   Listener a, b;
   Remover remover_1, remover_2;
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_1)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_2)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&a)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
 
   // |remover_1| will remove itself.
@@ -278,9 +279,9 @@
   CallbackList<void(void)> cb_reg;
   Adder a(&cb_reg);
   Listener b;
-  scoped_ptr<CallbackList<void(void)>::Subscription> a_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> a_subscription =
       cb_reg.Add(Bind(&Adder::AddCallback, Unretained(&a)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> b_subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> b_subscription =
       cb_reg.Add(Bind(&Listener::IncrementTotal, Unretained(&b)));
 
   cb_reg.Notify();
@@ -308,7 +309,7 @@
   cb_reg.set_removal_callback(
       Bind(&Counter::Increment, Unretained(&remove_count)));
 
-  scoped_ptr<CallbackList<void(void)>::Subscription> subscription =
+  std::unique_ptr<CallbackList<void(void)>::Subscription> subscription =
       cb_reg.Add(Bind(&DoNothing));
 
   // Removing a subscription outside of iteration signals the callback.
@@ -318,12 +319,12 @@
 
   // Configure two subscriptions to remove themselves.
   Remover remover_1, remover_2;
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_1)));
-  scoped_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
-      cb_reg.Add(Bind(&Remover::IncrementTotalAndRemove,
-          Unretained(&remover_2)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_1_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_1)));
+  std::unique_ptr<CallbackList<void(void)>::Subscription> remover_2_sub =
+      cb_reg.Add(
+          Bind(&Remover::IncrementTotalAndRemove, Unretained(&remover_2)));
   remover_1.SetSubscriptionToRemove(std::move(remover_1_sub));
   remover_2.SetSubscriptionToRemove(std::move(remover_2_sub));
 
diff --git a/base/callback_unittest.cc b/base/callback_unittest.cc
index 1f492d4..176ea06 100644
--- a/base/callback_unittest.cc
+++ b/base/callback_unittest.cc
@@ -2,12 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/bind.h"
 #include "base/callback.h"
+
+#include <memory>
+
+#include "base/bind.h"
 #include "base/callback_helpers.h"
 #include "base/callback_internal.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
diff --git a/base/cancelable_callback.h b/base/cancelable_callback.h
index 47dfb2d..0034fdd 100644
--- a/base/cancelable_callback.h
+++ b/base/cancelable_callback.h
@@ -42,6 +42,8 @@
 #ifndef BASE_CANCELABLE_CALLBACK_H_
 #define BASE_CANCELABLE_CALLBACK_H_
 
+#include <utility>
+
 #include "base/base_export.h"
 #include "base/bind.h"
 #include "base/callback.h"
@@ -103,7 +105,7 @@
 
  private:
   void Forward(A... args) const {
-    callback_.Run(args...);
+    callback_.Run(std::forward<A>(args)...);
   }
 
   // Helper method to bind |forwarder_| using a weak pointer from
diff --git a/base/cancelable_callback_unittest.cc b/base/cancelable_callback_unittest.cc
index 6d0a114..23b6c1c 100644
--- a/base/cancelable_callback_unittest.cc
+++ b/base/cancelable_callback_unittest.cc
@@ -4,13 +4,16 @@
 
 #include "base/cancelable_callback.h"
 
+#include <memory>
+
 #include "base/bind.h"
 #include "base/bind_helpers.h"
 #include "base/location.h"
+#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted.h"
 #include "base/run_loop.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -26,6 +29,10 @@
 void IncrementBy(int* count, int n) { (*count) += n; }
 void RefCountedParam(const scoped_refptr<TestRefCounted>& ref_counted) {}
 
+void OnMoveOnlyReceived(int* value, std::unique_ptr<int> result) {
+  *value = *result;
+}
+
 // Cancel().
 //  - Callback can be run multiple times.
 //  - After Cancel(), Run() completes but has no effect.
@@ -182,5 +189,17 @@
   EXPECT_EQ(1, count);
 }
 
+// CancelableCallback can be used with move-only types.
+TEST(CancelableCallbackTest, MoveOnlyType) {
+  const int kExpectedResult = 42;
+
+  int result = 0;
+  CancelableCallback<void(std::unique_ptr<int>)> cb(
+      base::Bind(&OnMoveOnlyReceived, base::Unretained(&result)));
+  cb.callback().Run(base::WrapUnique(new int(kExpectedResult)));
+
+  EXPECT_EQ(kExpectedResult, result);
+}
+
 }  // namespace
 }  // namespace base
diff --git a/base/command_line.cc b/base/command_line.cc
index 40b65b9..8b45c36 100644
--- a/base/command_line.cc
+++ b/base/command_line.cc
@@ -149,10 +149,7 @@
 
 }  // namespace
 
-CommandLine::CommandLine(NoProgram /* no_program */)
-    : argv_(1),
-      begin_args_(1) {
-}
+CommandLine::CommandLine(NoProgram) : argv_(1), begin_args_(1) {}
 
 CommandLine::CommandLine(const FilePath& program)
     : argv_(1),
@@ -443,7 +440,10 @@
 }
 
 CommandLine::StringType CommandLine::GetArgumentsStringInternal(
-    bool /* quote_placeholders */) const {
+    bool quote_placeholders) const {
+#if !defined(OS_WIN)
+  (void)quote_placeholders;  // Avoid an unused warning.
+#endif
   StringType params;
   // Append switches and arguments.
   bool parse_switches = true;
diff --git a/base/command_line_unittest.cc b/base/command_line_unittest.cc
index 967ce1c..bcfc6c5 100644
--- a/base/command_line_unittest.cc
+++ b/base/command_line_unittest.cc
@@ -2,13 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/command_line.h"
+
+#include <memory>
 #include <string>
 #include <vector>
 
-#include "base/command_line.h"
 #include "base/files/file_path.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/utf_string_conversions.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -390,7 +391,8 @@
 
 // Test that copies of CommandLine have a valid StringPiece map.
 TEST(CommandLineTest, Copy) {
-  scoped_ptr<CommandLine> initial(new CommandLine(CommandLine::NO_PROGRAM));
+  std::unique_ptr<CommandLine> initial(
+      new CommandLine(CommandLine::NO_PROGRAM));
   initial->AppendSwitch("a");
   initial->AppendSwitch("bbbbbbbbbbbbbbb");
   initial->AppendSwitch("c");
diff --git a/base/compiler_specific.h b/base/compiler_specific.h
index 4067d61..c2a02de 100644
--- a/base/compiler_specific.h
+++ b/base/compiler_specific.h
@@ -192,4 +192,12 @@
 #endif  // defined(COMPILER_GCC)
 #endif  // !defined(UNLIKELY)
 
+// Compiler feature-detection.
+// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
+#if defined(__has_feature)
+#define HAS_FEATURE(FEATURE) __has_feature(FEATURE)
+#else
+#define HAS_FEATURE(FEATURE) 0
+#endif
+
 #endif  // BASE_COMPILER_SPECIFIC_H_
diff --git a/base/containers/scoped_ptr_hash_map.h b/base/containers/scoped_ptr_hash_map.h
index dd100c6..f513f06 100644
--- a/base/containers/scoped_ptr_hash_map.h
+++ b/base/containers/scoped_ptr_hash_map.h
@@ -8,19 +8,19 @@
 #include <stddef.h>
 
 #include <algorithm>
+#include <memory>
 #include <utility>
 
 #include "base/containers/hash_tables.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/stl_util.h"
 
 namespace base {
 
 // Deprecated. Use std::unordered_map instead. https://crbug.com/579229
 //
-// This type acts like a hash_map<K, scoped_ptr<V, D> >, based on top of
+// This type acts like a hash_map<K, std::unique_ptr<V, D> >, based on top of
 // base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
 // structure.
 template <typename Key, typename ScopedPtr>
diff --git a/base/cpu.cc b/base/cpu.cc
index 7135445..de4a001 100644
--- a/base/cpu.cc
+++ b/base/cpu.cc
@@ -7,13 +7,11 @@
 #include <limits.h>
 #include <stddef.h>
 #include <stdint.h>
-#include <stdlib.h>
 #include <string.h>
 
 #include <algorithm>
 
 #include "base/macros.h"
-#include "base/strings/string_piece.h"
 #include "build/build_config.h"
 
 #if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
@@ -49,7 +47,6 @@
     has_avx2_(false),
     has_aesni_(false),
     has_non_stop_time_stamp_counter_(false),
-    has_broken_neon_(false),
     cpu_vendor_("unknown") {
   Initialize();
 }
@@ -99,7 +96,7 @@
 #if defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
 class LazyCpuInfoValue {
  public:
-  LazyCpuInfoValue() : has_broken_neon_(false) {
+  LazyCpuInfoValue() {
     // This function finds the value from /proc/cpuinfo under the key "model
     // name" or "Processor". "model name" is used in Linux 3.8 and later (3.7
     // and later for arm64) and is shown once per CPU. "Processor" is used in
@@ -108,21 +105,6 @@
     const char kModelNamePrefix[] = "model name\t: ";
     const char kProcessorPrefix[] = "Processor\t: ";
 
-    // This function also calculates whether we believe that this CPU has a
-    // broken NEON unit based on these fields from cpuinfo:
-    unsigned implementer = 0, architecture = 0, variant = 0, part = 0,
-             revision = 0;
-    const struct {
-      const char key[17];
-      unsigned int* result;
-    } kUnsignedValues[] = {
-      {"CPU implementer", &implementer},
-      {"CPU architecture", &architecture},
-      {"CPU variant", &variant},
-      {"CPU part", &part},
-      {"CPU revision", &revision},
-    };
-
     std::string contents;
     ReadFileToString(FilePath("/proc/cpuinfo"), &contents);
     DCHECK(!contents.empty());
@@ -138,52 +120,13 @@
            line.compare(0, strlen(kProcessorPrefix), kProcessorPrefix) == 0)) {
         brand_.assign(line.substr(strlen(kModelNamePrefix)));
       }
-
-      for (size_t i = 0; i < arraysize(kUnsignedValues); i++) {
-        const char *key = kUnsignedValues[i].key;
-        const size_t len = strlen(key);
-
-        if (line.compare(0, len, key) == 0 &&
-            line.size() >= len + 1 &&
-            (line[len] == '\t' || line[len] == ' ' || line[len] == ':')) {
-          size_t colon_pos = line.find(':', len);
-          if (colon_pos == std::string::npos) {
-            continue;
-          }
-
-          const StringPiece line_sp(line);
-          StringPiece value_sp = line_sp.substr(colon_pos + 1);
-          while (!value_sp.empty() &&
-                 (value_sp[0] == ' ' || value_sp[0] == '\t')) {
-            value_sp = value_sp.substr(1);
-          }
-
-          // The string may have leading "0x" or not, so we use strtoul to
-          // handle that.
-          char* endptr;
-          std::string value(value_sp.as_string());
-          unsigned long int result = strtoul(value.c_str(), &endptr, 0);
-          if (*endptr == 0 && result <= UINT_MAX) {
-            *kUnsignedValues[i].result = result;
-          }
-        }
-      }
     }
-
-    has_broken_neon_ =
-      implementer == 0x51 &&
-      architecture == 7 &&
-      variant == 1 &&
-      part == 0x4d &&
-      revision == 0;
   }
 
   const std::string& brand() const { return brand_; }
-  bool has_broken_neon() const { return has_broken_neon_; }
 
  private:
   std::string brand_;
-  bool has_broken_neon_;
   DISALLOW_COPY_AND_ASSIGN(LazyCpuInfoValue);
 };
 
@@ -277,7 +220,6 @@
   }
 #elif defined(ARCH_CPU_ARM_FAMILY) && (defined(OS_ANDROID) || defined(OS_LINUX))
   cpu_brand_.assign(g_lazy_cpuinfo.Get().brand());
-  has_broken_neon_ = g_lazy_cpuinfo.Get().has_broken_neon();
 #endif
 }
 
diff --git a/base/cpu.h b/base/cpu.h
index 8c3c06c..0e4303b 100644
--- a/base/cpu.h
+++ b/base/cpu.h
@@ -52,10 +52,6 @@
   bool has_non_stop_time_stamp_counter() const {
     return has_non_stop_time_stamp_counter_;
   }
-  // has_broken_neon is only valid on ARM chips. If true, it indicates that we
-  // believe that the NEON unit on the current CPU is flawed and cannot execute
-  // some code. See https://code.google.com/p/chromium/issues/detail?id=341598
-  bool has_broken_neon() const { return has_broken_neon_; }
 
   IntelMicroArchitecture GetIntelMicroArchitecture() const;
   const std::string& cpu_brand() const { return cpu_brand_; }
@@ -82,7 +78,6 @@
   bool has_avx2_;
   bool has_aesni_;
   bool has_non_stop_time_stamp_counter_;
-  bool has_broken_neon_;
   std::string cpu_vendor_;
   std::string cpu_brand_;
 };
diff --git a/base/debug/alias.cc b/base/debug/alias.cc
index d498084..ff35574 100644
--- a/base/debug/alias.cc
+++ b/base/debug/alias.cc
@@ -12,8 +12,7 @@
 #pragma optimize("", off)
 #endif
 
-void Alias(const void* /* var */) {
-}
+void Alias(const void*) {}
 
 #if defined(COMPILER_MSVC)
 #pragma optimize("", on)
diff --git a/base/debug/debugger_posix.cc b/base/debug/debugger_posix.cc
index d7e492b..a157d9a 100644
--- a/base/debug/debugger_posix.cc
+++ b/base/debug/debugger_posix.cc
@@ -3,8 +3,6 @@
 // found in the LICENSE file.
 
 #include "base/debug/debugger.h"
-#include "base/macros.h"
-#include "build/build_config.h"
 
 #include <errno.h>
 #include <fcntl.h>
@@ -16,8 +14,12 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <memory>
 #include <vector>
 
+#include "base/macros.h"
+#include "build/build_config.h"
+
 #if defined(__GLIBCXX__)
 #include <cxxabi.h>
 #endif
@@ -38,7 +40,6 @@
 
 #include "base/debug/alias.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/string_piece.h"
 
diff --git a/base/debug/debugging_flags.h b/base/debug/debugging_flags.h
new file mode 100644
index 0000000..1ea435f
--- /dev/null
+++ b/base/debug/debugging_flags.h
@@ -0,0 +1,11 @@
+// Generated by build/write_buildflag_header.py
+// From "base_debugging_flags"
+
+#ifndef BASE_DEBUG_DEBUGGING_FLAGS_H_
+#define BASE_DEBUG_DEBUGGING_FLAGS_H_
+
+#include "build/buildflag.h"
+
+#define BUILDFLAG_INTERNAL_ENABLE_PROFILING() (0)
+
+#endif  // BASE_DEBUG_DEBUGGING_FLAGS_H_
diff --git a/base/debug/leak_annotations.h b/base/debug/leak_annotations.h
new file mode 100644
index 0000000..dc50246
--- /dev/null
+++ b/base/debug/leak_annotations.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_LEAK_ANNOTATIONS_H_
+#define BASE_DEBUG_LEAK_ANNOTATIONS_H_
+
+#include "base/macros.h"
+#include "build/build_config.h"
+
+// This file defines macros which can be used to annotate intentional memory
+// leaks. Support for annotations is implemented in LeakSanitizer. Annotated
+// objects will be treated as a source of live pointers, i.e. any heap objects
+// reachable by following pointers from an annotated object will not be
+// reported as leaks.
+//
+// ANNOTATE_SCOPED_MEMORY_LEAK: all allocations made in the current scope
+// will be annotated as leaks.
+// ANNOTATE_LEAKING_OBJECT_PTR(X): the heap object referenced by pointer X will
+// be annotated as a leak.
+
+#if defined(LEAK_SANITIZER) && !defined(OS_NACL)
+
+#include <sanitizer/lsan_interface.h>
+
+class ScopedLeakSanitizerDisabler {
+ public:
+  ScopedLeakSanitizerDisabler() { __lsan_disable(); }
+  ~ScopedLeakSanitizerDisabler() { __lsan_enable(); }
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedLeakSanitizerDisabler);
+};
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK \
+    ScopedLeakSanitizerDisabler leak_sanitizer_disabler; static_cast<void>(0)
+
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) __lsan_ignore_object(X);
+
+#else
+
+#define ANNOTATE_SCOPED_MEMORY_LEAK ((void)0)
+#define ANNOTATE_LEAKING_OBJECT_PTR(X) ((void)0)
+
+#endif
+
+#endif  // BASE_DEBUG_LEAK_ANNOTATIONS_H_
diff --git a/base/debug/leak_tracker_unittest.cc b/base/debug/leak_tracker_unittest.cc
index 99df4c1..8b4c568 100644
--- a/base/debug/leak_tracker_unittest.cc
+++ b/base/debug/leak_tracker_unittest.cc
@@ -3,7 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/debug/leak_tracker.h"
-#include "base/memory/scoped_ptr.h"
+
+#include <memory>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -29,9 +31,9 @@
   EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
 
   // Use scoped_ptr so compiler doesn't complain about unused variables.
-  scoped_ptr<ClassA> a1(new ClassA);
-  scoped_ptr<ClassB> b1(new ClassB);
-  scoped_ptr<ClassB> b2(new ClassB);
+  std::unique_ptr<ClassA> a1(new ClassA);
+  std::unique_ptr<ClassB> b1(new ClassB);
+  std::unique_ptr<ClassB> b2(new ClassB);
 
   EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
   EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
@@ -52,7 +54,7 @@
     EXPECT_EQ(1, LeakTracker<ClassA>::NumLiveInstances());
     EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
 
-    scoped_ptr<ClassA> a2(new ClassA);
+    std::unique_ptr<ClassA> a2(new ClassA);
 
     EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
     EXPECT_EQ(2, LeakTracker<ClassB>::NumLiveInstances());
@@ -72,10 +74,10 @@
 TEST(LeakTrackerTest, LinkedList) {
   EXPECT_EQ(0, LeakTracker<ClassB>::NumLiveInstances());
 
-  scoped_ptr<ClassA> a1(new ClassA);
-  scoped_ptr<ClassA> a2(new ClassA);
-  scoped_ptr<ClassA> a3(new ClassA);
-  scoped_ptr<ClassA> a4(new ClassA);
+  std::unique_ptr<ClassA> a1(new ClassA);
+  std::unique_ptr<ClassA> a2(new ClassA);
+  std::unique_ptr<ClassA> a3(new ClassA);
+  std::unique_ptr<ClassA> a4(new ClassA);
 
   EXPECT_EQ(4, LeakTracker<ClassA>::NumLiveInstances());
 
@@ -88,7 +90,7 @@
   EXPECT_EQ(2, LeakTracker<ClassA>::NumLiveInstances());
 
   // Append to the new tail of the list (a3).
-  scoped_ptr<ClassA> a5(new ClassA);
+  std::unique_ptr<ClassA> a5(new ClassA);
   EXPECT_EQ(3, LeakTracker<ClassA>::NumLiveInstances());
 
   a2.reset();
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 2250c8f..1c96a56 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -39,5 +39,54 @@
   return stream.str();
 }
 
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+
+size_t TraceStackFramePointers(const void** out_trace,
+                               size_t max_depth,
+                               size_t skip_initial) {
+  // Usage of __builtin_frame_address() enables frame pointers in this
+  // function even if they are not enabled globally. So 'sp' will always
+  // be valid.
+  uintptr_t sp = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+
+  size_t depth = 0;
+  while (depth < max_depth) {
+#if defined(__arm__) && defined(__GNUC__) && !defined(__clang__)
+    // GCC and LLVM generate slightly different frames on ARM, see
+    // https://llvm.org/bugs/show_bug.cgi?id=18505 - LLVM generates
+    // x86-compatible frame, while GCC needs adjustment.
+    sp -= sizeof(uintptr_t);
+#endif
+
+    if (skip_initial != 0) {
+      skip_initial--;
+    } else {
+      out_trace[depth++] = reinterpret_cast<const void**>(sp)[1];
+    }
+
+    // Find out next frame pointer
+    // (heuristics are from TCMalloc's stacktrace functions)
+    {
+      uintptr_t next_sp = reinterpret_cast<const uintptr_t*>(sp)[0];
+
+      // With the stack growing downwards, older stack frame must be
+      // at a greater address that the current one.
+      if (next_sp <= sp) break;
+
+      // Assume stack frames larger than 100,000 bytes are bogus.
+      if (next_sp - sp > 100000) break;
+
+      // Check alignment.
+      if (sp & (sizeof(void*) - 1)) break;
+
+      sp = next_sp;
+    }
+  }
+
+  return depth;
+}
+
+#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
+
 }  // namespace debug
 }  // namespace base
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index 07e119a..23e7b51 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -22,6 +22,14 @@
 struct _CONTEXT;
 #endif
 
+#if defined(OS_POSIX) && ( \
+    defined(__i386__) || defined(__x86_64__) || \
+    (defined(__arm__) && !defined(__thumb__)))
+#define HAVE_TRACE_STACK_FRAME_POINTERS 1
+#else
+#define HAVE_TRACE_STACK_FRAME_POINTERS 0
+#endif
+
 namespace base {
 namespace debug {
 
@@ -93,6 +101,20 @@
   size_t count_;
 };
 
+#if HAVE_TRACE_STACK_FRAME_POINTERS
+// Traces the stack by using frame pointers. This function is faster but less
+// reliable than StackTrace. It should work for debug and profiling builds,
+// but not for release builds (although there are some exceptions).
+//
+// Writes at most |max_depth| frames (instruction pointers) into |out_trace|
+// after skipping |skip_initial| frames. Note that the function itself is not
+// added to the trace so |skip_initial| should be 0 in most cases.
+// Returns number of frames written.
+BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
+                                           size_t max_depth,
+                                           size_t skip_initial);
+#endif  // HAVE_TRACE_STACK_FRAME_POINTERS
+
 namespace internal {
 
 #if defined(OS_POSIX) && !defined(OS_ANDROID)
diff --git a/base/debug/stack_trace_posix.cc b/base/debug/stack_trace_posix.cc
index d6a03f3..3c0299c 100644
--- a/base/debug/stack_trace_posix.cc
+++ b/base/debug/stack_trace_posix.cc
@@ -17,6 +17,7 @@
 #include <unistd.h>
 
 #include <map>
+#include <memory>
 #include <ostream>
 #include <string>
 #include <vector>
@@ -36,7 +37,7 @@
 #include "base/debug/proc_maps_linux.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/free_deleter.h"
 #include "base/memory/singleton.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/posix/eintr_wrapper.h"
@@ -95,7 +96,7 @@
 
     // Try to demangle the mangled symbol candidate.
     int status = 0;
-    scoped_ptr<char, base::FreeDeleter> demangled_symbol(
+    std::unique_ptr<char, base::FreeDeleter> demangled_symbol(
         abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
     if (status == 0) {  // Demangling is successful.
       // Remove the mangled symbol.
@@ -183,8 +184,8 @@
   // Below part is async-signal unsafe (uses malloc), so execute it only
   // when we are not executing the signal handler.
   if (in_signal_handler == 0) {
-    scoped_ptr<char*, FreeDeleter>
-        trace_symbols(backtrace_symbols(trace, size));
+    std::unique_ptr<char*, FreeDeleter> trace_symbols(
+        backtrace_symbols(trace, size));
     if (trace_symbols.get()) {
       for (size_t i = 0; i < size; ++i) {
         std::string trace_symbol = trace_symbols.get()[i];
@@ -214,9 +215,7 @@
   ignore_result(HANDLE_EINTR(write(STDERR_FILENO, output, strlen(output))));
 }
 
-void StackDumpSignalHandler(int signal,
-                            siginfo_t* info,
-                            void* void_context) {
+void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
   (void)void_context;  // unused depending on build context
   // NOTE: This code MUST be async-signal safe.
   // NO malloc or stdio is allowed here.
diff --git a/base/debug/task_annotator.h b/base/debug/task_annotator.h
index 443c71b..2687c5c 100644
--- a/base/debug/task_annotator.h
+++ b/base/debug/task_annotator.h
@@ -39,11 +39,6 @@
   DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
 };
 
-#define TRACE_TASK_EXECUTION(run_function, task)           \
-  TRACE_EVENT2("toplevel", (run_function), "src_file",     \
-               (task).posted_from.file_name(), "src_func", \
-               (task).posted_from.function_name());
-
 }  // namespace debug
 }  // namespace base
 
diff --git a/base/environment.cc b/base/environment.cc
index adb7387..9eef429 100644
--- a/base/environment.cc
+++ b/base/environment.cc
@@ -35,9 +35,9 @@
     // I.e. HTTP_PROXY may be http_proxy for some users/systems.
     char first_char = variable_name[0];
     std::string alternate_case_var;
-    if (first_char >= 'a' && first_char <= 'z')
+    if (IsAsciiLower(first_char))
       alternate_case_var = ToUpperASCII(variable_name);
-    else if (first_char >= 'A' && first_char <= 'Z')
+    else if (IsAsciiUpper(first_char))
       alternate_case_var = ToLowerASCII(variable_name);
     else
       return false;
@@ -69,7 +69,7 @@
     if (value_length == 0)
       return false;
     if (result) {
-      scoped_ptr<wchar_t[]> value(new wchar_t[value_length]);
+      std::unique_ptr<wchar_t[]> value(new wchar_t[value_length]);
       ::GetEnvironmentVariable(UTF8ToWide(variable_name).c_str(), value.get(),
                                value_length);
       *result = WideToUTF8(value.get());
@@ -184,8 +184,8 @@
 
 #elif defined(OS_POSIX)
 
-scoped_ptr<char*[]> AlterEnvironment(const char* const* const env,
-                                     const EnvironmentMap& changes) {
+std::unique_ptr<char* []> AlterEnvironment(const char* const* const env,
+                                           const EnvironmentMap& changes) {
   std::string value_storage;  // Holds concatenated null-terminated strings.
   std::vector<size_t> result_indices;  // Line indices into value_storage.
 
@@ -218,7 +218,7 @@
   size_t pointer_count_required =
       result_indices.size() + 1 +  // Null-terminated array of pointers.
       (value_storage.size() + sizeof(char*) - 1) / sizeof(char*);  // Buffer.
-  scoped_ptr<char*[]> result(new char*[pointer_count_required]);
+  std::unique_ptr<char* []> result(new char*[pointer_count_required]);
 
   // The string storage goes after the array of pointers.
   char* storage_data = reinterpret_cast<char*>(
diff --git a/base/environment.h b/base/environment.h
index c8811e2..12eeaf7 100644
--- a/base/environment.h
+++ b/base/environment.h
@@ -6,10 +6,10 @@
 #define BASE_ENVIRONMENT_H_
 
 #include <map>
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string16.h"
 #include "build/build_config.h"
 
@@ -79,7 +79,7 @@
 // returned array will have appended to it the storage for the array itself so
 // there is only one pointer to manage, but this means that you can't copy the
 // array without keeping the original around.
-BASE_EXPORT scoped_ptr<char*[]> AlterEnvironment(
+BASE_EXPORT std::unique_ptr<char* []> AlterEnvironment(
     const char* const* env,
     const EnvironmentMap& changes);
 
diff --git a/base/environment_unittest.cc b/base/environment_unittest.cc
index 77e9717..ef264cf 100644
--- a/base/environment_unittest.cc
+++ b/base/environment_unittest.cc
@@ -3,7 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/environment.h"
-#include "base/memory/scoped_ptr.h"
+
+#include <memory>
+
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
@@ -14,14 +16,14 @@
 
 TEST_F(EnvironmentTest, GetVar) {
   // Every setup should have non-empty PATH...
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   std::string env_value;
   EXPECT_TRUE(env->GetVar("PATH", &env_value));
   EXPECT_NE(env_value, "");
 }
 
 TEST_F(EnvironmentTest, GetVarReverse) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   const char kFooUpper[] = "FOO";
   const char kFooLower[] = "foo";
 
@@ -50,12 +52,12 @@
 
 TEST_F(EnvironmentTest, HasVar) {
   // Every setup should have PATH...
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   EXPECT_TRUE(env->HasVar("PATH"));
 }
 
 TEST_F(EnvironmentTest, SetVar) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
 
   const char kFooUpper[] = "FOO";
   const char kFooLower[] = "foo";
@@ -70,7 +72,7 @@
 }
 
 TEST_F(EnvironmentTest, UnSetVar) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
 
   const char kFooUpper[] = "FOO";
   const char kFooLower[] = "foo";
@@ -128,7 +130,7 @@
   const char* const empty[] = { NULL };
   const char* const a2[] = { "A=2", NULL };
   EnvironmentMap changes;
-  scoped_ptr<char*[]> e;
+  std::unique_ptr<char* []> e;
 
   e = AlterEnvironment(empty, changes);
   EXPECT_TRUE(e[0] == NULL);
diff --git a/base/feature_list.cc b/base/feature_list.cc
index d10c60b..4673210 100644
--- a/base/feature_list.cc
+++ b/base/feature_list.cc
@@ -30,12 +30,15 @@
 // are any reserved characters present, returning true if the string is valid.
 // Only called in DCHECKs.
 bool IsValidFeatureOrFieldTrialName(const std::string& name) {
-  return IsStringASCII(name) && name.find_first_of(",<") == std::string::npos;
+  return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
 }
 
 }  // namespace
 
-FeatureList::FeatureList() : initialized_(false) {}
+FeatureList::FeatureList()
+  : initialized_(false),
+    initialized_from_command_line_(false) {
+}
 
 FeatureList::~FeatureList() {}
 
@@ -48,6 +51,8 @@
   // enabled ones (since RegisterOverride() uses insert()).
   RegisterOverridesFromCommandLine(disable_features, OVERRIDE_DISABLE_FEATURE);
   RegisterOverridesFromCommandLine(enable_features, OVERRIDE_ENABLE_FEATURE);
+
+  initialized_from_command_line_ = true;
 }
 
 bool FeatureList::IsFeatureOverriddenFromCommandLine(
@@ -99,9 +104,13 @@
   enable_overrides->clear();
   disable_overrides->clear();
 
+  // Note: Since |overrides_| is a std::map, iteration will be in alphabetical
+  // order. This not guaranteed to users of this function, but is useful for
+  // tests to assume the order.
   for (const auto& entry : overrides_) {
     std::string* target_list = nullptr;
     switch (entry.second.overridden_state) {
+      case OVERRIDE_USE_DEFAULT:
       case OVERRIDE_ENABLE_FEATURE:
         target_list = enable_overrides;
         break;
@@ -112,6 +121,8 @@
 
     if (!target_list->empty())
       target_list->push_back(',');
+    if (entry.second.overridden_state == OVERRIDE_USE_DEFAULT)
+      target_list->push_back('*');
     target_list->append(entry.first);
     if (entry.second.field_trial) {
       target_list->push_back('<');
@@ -126,16 +137,41 @@
 }
 
 // static
+FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
+  return GetInstance()->GetAssociatedFieldTrial(feature);
+}
+
+// static
 std::vector<std::string> FeatureList::SplitFeatureListString(
     const std::string& input) {
   return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
 }
 
 // static
-void FeatureList::InitializeInstance() {
-  if (g_instance)
-    return;
-  SetInstance(make_scoped_ptr(new FeatureList));
+bool FeatureList::InitializeInstance(const std::string& enable_features,
+                                     const std::string& disable_features) {
+  // We want to initialize a new instance here to support command-line features
+  // in testing better. For example, we initialize a dummy instance in
+  // base/test/test_suite.cc, and override it in content/browser/
+  // browser_main_loop.cc.
+  // On the other hand, we want to avoid re-initialization from command line.
+  // For example, we initialize an instance in chrome/browser/
+  // chrome_browser_main.cc and do not override it in content/browser/
+  // browser_main_loop.cc.
+  bool instance_existed_before = false;
+  if (g_instance) {
+    if (g_instance->initialized_from_command_line_)
+      return false;
+
+    delete g_instance;
+    g_instance = nullptr;
+    instance_existed_before = true;
+  }
+
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+  feature_list->InitializeFromCommandLine(enable_features, disable_features);
+  base::FeatureList::SetInstance(std::move(feature_list));
+  return !instance_existed_before;
 }
 
 // static
@@ -144,7 +180,7 @@
 }
 
 // static
-void FeatureList::SetInstance(scoped_ptr<FeatureList> instance) {
+void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
   DCHECK(!g_instance);
   instance->FinalizeInitialization();
 
@@ -177,12 +213,29 @@
       entry.field_trial->group();
 
     // TODO(asvitkine) Expand this section as more support is added.
-    return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
+
+    // If marked as OVERRIDE_USE_DEFAULT, simply return the default state below.
+    if (entry.overridden_state != OVERRIDE_USE_DEFAULT)
+      return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
   }
   // Otherwise, return the default state.
   return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
 }
 
+FieldTrial* FeatureList::GetAssociatedFieldTrial(const Feature& feature) {
+  DCHECK(initialized_);
+  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+  auto it = overrides_.find(feature.name);
+  if (it != overrides_.end()) {
+    const OverrideEntry& entry = it->second;
+    return entry.field_trial;
+  }
+
+  return nullptr;
+}
+
 void FeatureList::RegisterOverridesFromCommandLine(
     const std::string& feature_list,
     OverrideState overridden_state) {
@@ -210,6 +263,10 @@
     DCHECK(IsValidFeatureOrFieldTrialName(field_trial->trial_name()))
         << field_trial->trial_name();
   }
+  if (feature_name.starts_with("*")) {
+    feature_name = feature_name.substr(1);
+    overridden_state = OVERRIDE_USE_DEFAULT;
+  }
 
   // Note: The semantics of insert() is that it does not overwrite the entry if
   // one already exists for the key. Thus, only the first override for a given
diff --git a/base/feature_list.h b/base/feature_list.h
index 875d3b5..2a47427 100644
--- a/base/feature_list.h
+++ b/base/feature_list.h
@@ -6,13 +6,13 @@
 #define BASE_FEATURE_LIST_H_
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/base_export.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 #include "base/synchronization/lock.h"
 
@@ -84,13 +84,17 @@
   // enable or disable, respectively. If a feature appears on both lists, then
   // it will be disabled. If a list entry has the format "FeatureName<TrialName"
   // then this initialization will also associate the feature state override
-  // with the named field trial, if it exists. Must only be invoked during the
-  // initialization phase (before FinalizeInitialization() has been called).
+  // with the named field trial, if it exists. If a feature name is prefixed
+  // with the '*' character, it will be created with OVERRIDE_USE_DEFAULT -
+  // which is useful for associating with a trial while using the default state.
+  // Must only be invoked during the initialization phase (before
+  // FinalizeInitialization() has been called).
   void InitializeFromCommandLine(const std::string& enable_features,
                                  const std::string& disable_features);
 
   // Specifies whether a feature override enables or disables the feature.
   enum OverrideState {
+    OVERRIDE_USE_DEFAULT,
     OVERRIDE_DISABLE_FEATURE,
     OVERRIDE_ENABLE_FEATURE,
   };
@@ -125,8 +129,9 @@
   // have been overridden - either through command-line or via FieldTrials. For
   // those features that have an associated FieldTrial, the output entry will be
   // of the format "FeatureName<TrialName", where "TrialName" is the name of the
-  // FieldTrial. Must be called only after the instance has been initialized and
-  // registered.
+  // FieldTrial. Features that have overrides with OVERRIDE_USE_DEFAULT will be
+  // added to |enable_overrides| with a '*' character prefix. Must be called
+  // only after the instance has been initialized and registered.
   void GetFeatureOverrides(std::string* enable_overrides,
                            std::string* disable_overrides);
 
@@ -136,13 +141,21 @@
   // struct, which is checked in builds with DCHECKs enabled.
   static bool IsEnabled(const Feature& feature);
 
+  // Returns the field trial associated with the given |feature|. Must only be
+  // called after the singleton instance has been registered via SetInstance().
+  static FieldTrial* GetFieldTrial(const Feature& feature);
+
   // Splits a comma-separated string containing feature names into a vector.
   static std::vector<std::string> SplitFeatureListString(
       const std::string& input);
 
-  // Initializes and sets a default instance of FeatureList if one has not yet
-  // already been set. No-op otherwise.
-  static void InitializeInstance();
+  // Initializes and sets an instance of FeatureList with feature overrides via
+  // command-line flags |enable_features| and |disable_features| if one has not
+  // already been set from command-line flags. Returns true if an instance did
+  // not previously exist. See InitializeFromCommandLine() for more details
+  // about |enable_features| and |disable_features| parameters.
+  static bool InitializeInstance(const std::string& enable_features,
+                                 const std::string& disable_features);
 
   // Returns the singleton instance of FeatureList. Will return null until an
   // instance is registered via SetInstance().
@@ -150,7 +163,7 @@
 
   // Registers the given |instance| to be the singleton feature list for this
   // process. This should only be called once and |instance| must not be null.
-  static void SetInstance(scoped_ptr<FeatureList> instance);
+  static void SetInstance(std::unique_ptr<FeatureList> instance);
 
   // Clears the previously-registered singleton instance for tests.
   static void ClearInstanceForTesting();
@@ -191,6 +204,12 @@
   // Requires the FeatureList to have already been fully initialized.
   bool IsFeatureEnabled(const Feature& feature);
 
+  // Returns the field trial associated with the given |feature|. This is
+  // invoked by the public FeatureList::GetFieldTrial() static function on the
+  // global singleton. Requires the FeatureList to have already been fully
+  // initialized.
+  base::FieldTrial* GetAssociatedFieldTrial(const Feature& feature);
+
   // For each feature name in comma-separated list of strings |feature_list|,
   // registers an override with the specified |overridden_state|. Also, will
   // associate an optional named field trial if the entry is of the format
@@ -230,6 +249,9 @@
   // result of FinalizeInitialization().
   bool initialized_;
 
+  // Whether this object has been initialized from command line.
+  bool initialized_from_command_line_;
+
   DISALLOW_COPY_AND_ASSIGN(FeatureList);
 };
 
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
index 11cf179..a7e7b71 100644
--- a/base/feature_list_unittest.cc
+++ b/base/feature_list_unittest.cc
@@ -6,10 +6,12 @@
 
 #include <stddef.h>
 
+#include <algorithm>
 #include <utility>
 
 #include "base/format_macros.h"
 #include "base/macros.h"
+#include "base/memory/ptr_util.h"
 #include "base/metrics/field_trial.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
@@ -41,11 +43,11 @@
 class FeatureListTest : public testing::Test {
  public:
   FeatureListTest() : feature_list_(nullptr) {
-    RegisterFeatureListInstance(make_scoped_ptr(new FeatureList));
+    RegisterFeatureListInstance(WrapUnique(new FeatureList));
   }
   ~FeatureListTest() override { ClearFeatureListInstance(); }
 
-  void RegisterFeatureListInstance(scoped_ptr<FeatureList> feature_list) {
+  void RegisterFeatureListInstance(std::unique_ptr<FeatureList> feature_list) {
     FeatureList::ClearInstanceForTesting();
     feature_list_ = feature_list.get();
     FeatureList::SetInstance(std::move(feature_list));
@@ -92,7 +94,7 @@
                                     test_case.disable_features));
 
     ClearFeatureListInstance();
-    scoped_ptr<FeatureList> feature_list(new FeatureList);
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
     feature_list->InitializeFromCommandLine(test_case.enable_features,
                                             test_case.disable_features);
     RegisterFeatureListInstance(std::move(feature_list));
@@ -148,7 +150,7 @@
     ClearFeatureListInstance();
 
     FieldTrialList field_trial_list(nullptr);
-    scoped_ptr<FeatureList> feature_list(new FeatureList);
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
 
     FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
     FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
@@ -178,11 +180,40 @@
   }
 }
 
+TEST_F(FeatureListTest, FieldTrialAssociateUseDefault) {
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+
+  FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+  FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  // Initially, neither trial should be active.
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+  // Check the feature enabled state is its default.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  // The above should have activated |trial1|.
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+  // Check the feature enabled state is its default.
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  // The above should have activated |trial2|.
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+}
+
 TEST_F(FeatureListTest, CommandLineTakesPrecedenceOverFieldTrial) {
   ClearFeatureListInstance();
 
   FieldTrialList field_trial_list(nullptr);
-  scoped_ptr<FeatureList> feature_list(new FeatureList);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
 
   // The feature is explicitly enabled on the command-line.
   feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
@@ -206,7 +237,7 @@
   ClearFeatureListInstance();
 
   FieldTrialList field_trial_list(nullptr);
-  scoped_ptr<FeatureList> feature_list(new FeatureList);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
 
   // No features are overridden from the command line yet
   EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
@@ -281,7 +312,7 @@
     ClearFeatureListInstance();
 
     FieldTrialList field_trial_list(nullptr);
-    scoped_ptr<FeatureList> feature_list(new FeatureList);
+    std::unique_ptr<FeatureList> feature_list(new FeatureList);
     feature_list->InitializeFromCommandLine(test_case.enable_features,
                                             test_case.disable_features);
 
@@ -324,7 +355,7 @@
 TEST_F(FeatureListTest, GetFeatureOverrides) {
   ClearFeatureListInstance();
   FieldTrialList field_trial_list(nullptr);
-  scoped_ptr<FeatureList> feature_list(new FeatureList);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
   feature_list->InitializeFromCommandLine("A,X", "D");
 
   FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
@@ -342,11 +373,44 @@
   EXPECT_EQ("D", SortFeatureListString(disable_features));
 }
 
+TEST_F(FeatureListTest, GetFeatureOverrides_UseDefault) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,X", "D");
+
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  std::string enable_features;
+  std::string disable_features;
+  FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+                                                  &disable_features);
+  EXPECT_EQ("*OffByDefault<Trial,A,X", SortFeatureListString(enable_features));
+  EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, GetFieldTrial) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_EQ(trial, FeatureList::GetFieldTrial(kFeatureOnByDefault));
+  EXPECT_EQ(nullptr, FeatureList::GetFieldTrial(kFeatureOffByDefault));
+}
+
 TEST_F(FeatureListTest, InitializeFromCommandLine_WithFieldTrials) {
   ClearFeatureListInstance();
   FieldTrialList field_trial_list(nullptr);
   FieldTrialList::CreateFieldTrial("Trial", "Group");
-  scoped_ptr<FeatureList> feature_list(new FeatureList);
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
   feature_list->InitializeFromCommandLine("A,OffByDefault<Trial,X", "D");
   RegisterFeatureListInstance(std::move(feature_list));
 
@@ -355,4 +419,42 @@
   EXPECT_TRUE(FieldTrialList::IsTrialActive("Trial"));
 }
 
+TEST_F(FeatureListTest, InitializeFromCommandLine_UseDefault) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("T1", "Group");
+  FieldTrialList::CreateFieldTrial("T2", "Group");
+  std::unique_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine(
+      "A,*OffByDefault<T1,*OnByDefault<T2,X", "D");
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("T1"));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("T1"));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("T2"));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("T2"));
+}
+
+TEST_F(FeatureListTest, InitializeInstance) {
+  ClearFeatureListInstance();
+
+  std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+  FeatureList::SetInstance(std::move(feature_list));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+  // Initialize from command line if we haven't yet.
+  FeatureList::InitializeInstance("", kFeatureOnByDefaultName);
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+
+  // Do not initialize from commandline if we have already.
+  FeatureList::InitializeInstance(kFeatureOffByDefaultName, "");
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
 }  // namespace base
diff --git a/base/file_version_info.h b/base/file_version_info.h
index 8c1bf92..3b9457c 100644
--- a/base/file_version_info.h
+++ b/base/file_version_info.h
@@ -5,18 +5,15 @@
 #ifndef BASE_FILE_VERSION_INFO_H_
 #define BASE_FILE_VERSION_INFO_H_
 
+#include <string>
+
 #include "build/build_config.h"
+#include "base/base_export.h"
+#include "base/strings/string16.h"
 
 #if defined(OS_WIN)
 #include <windows.h>
-// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
-extern "C" IMAGE_DOS_HEADER __ImageBase;
-#endif  // OS_WIN
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/strings/string16.h"
+#endif
 
 namespace base {
 class FilePath;
@@ -32,17 +29,6 @@
 // version returns values from the Info.plist as appropriate. TODO(avi): make
 // this a less-obvious Windows-ism.
 
-#if defined(OS_WIN)
-// Creates a FileVersionInfo for the current module. Returns NULL in case of
-// error. The returned object should be deleted when you are done with it. This
-// is done as a macro to force inlining of __ImageBase. It used to be inside of
-// a method labeled with __forceinline, but inlining through __forceinline
-// stopped working for Debug builds in VS2013 (http://crbug.com/516359).
-#define CREATE_FILE_VERSION_INFO_FOR_CURRENT_MODULE() \
-    FileVersionInfo::CreateFileVersionInfoForModule( \
-        reinterpret_cast<HMODULE>(&__ImageBase))
-#endif
-
 class BASE_EXPORT FileVersionInfo {
  public:
   virtual ~FileVersionInfo() {}
@@ -57,8 +43,6 @@
 #if defined(OS_WIN)
   // Creates a FileVersionInfo for the specified module. Returns NULL in case
   // of error. The returned object should be deleted when you are done with it.
-  // See CREATE_FILE_VERSION_INFO_FOR_CURRENT_MODULE() helper above for a
-  // CreateFileVersionInfoForCurrentModule() alternative for Windows.
   static FileVersionInfo* CreateFileVersionInfoForModule(HMODULE module);
 #else
   // Creates a FileVersionInfo for the current module. Returns NULL in case
diff --git a/base/file_version_info_unittest.cc b/base/file_version_info_unittest.cc
index 66e298d..67edc77 100644
--- a/base/file_version_info_unittest.cc
+++ b/base/file_version_info_unittest.cc
@@ -2,12 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/file_version_info.h"
+
 #include <stddef.h>
 
-#include "base/file_version_info.h"
+#include <memory>
+
 #include "base/files/file_path.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -60,7 +63,7 @@
   FilePath dll_path = GetTestDataPath();
   dll_path = dll_path.Append(kDLLName);
 
-  scoped_ptr<FileVersionInfo> version_info(
+  std::unique_ptr<FileVersionInfo> version_info(
       FileVersionInfo::CreateFileVersionInfo(dll_path));
 
   int j = 0;
@@ -101,7 +104,7 @@
     FilePath dll_path = GetTestDataPath();
     dll_path = dll_path.Append(kDLLNames[i]);
 
-    scoped_ptr<FileVersionInfo> version_info(
+    std::unique_ptr<FileVersionInfo> version_info(
         FileVersionInfo::CreateFileVersionInfo(dll_path));
 
     EXPECT_EQ(kExpected[i], version_info->is_official_build());
@@ -114,7 +117,7 @@
   FilePath dll_path = GetTestDataPath();
   dll_path = dll_path.AppendASCII("FileVersionInfoTest1.dll");
 
-  scoped_ptr<FileVersionInfo> version_info(
+  std::unique_ptr<FileVersionInfo> version_info(
       FileVersionInfo::CreateFileVersionInfo(dll_path));
 
   // Test few existing properties.
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index d3cb53d..4adfa27 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -44,7 +44,7 @@
 // otherwise returns npos.  This can only be true on Windows, when a pathname
 // begins with a letter followed by a colon.  On other platforms, this always
 // returns npos.
-StringPieceType::size_type FindDriveLetter(StringPieceType /* path */) {
+StringPieceType::size_type FindDriveLetter(StringPieceType path) {
 #if defined(FILE_PATH_USES_DRIVE_LETTERS)
   // This is dependent on an ASCII-based character set, but that's a
   // reasonable assumption.  iswalpha can be too inclusive here.
@@ -53,6 +53,8 @@
        (path[0] >= L'a' && path[0] <= L'z'))) {
     return 1;
   }
+#else
+  (void)path;  // Avoid an unused warning.
 #endif  // FILE_PATH_USES_DRIVE_LETTERS
   return StringType::npos;
 }
@@ -1192,6 +1194,7 @@
 }
 
 StringType FilePath::GetHFSDecomposedForm(StringPieceType string) {
+  StringType result;
   ScopedCFTypeRef<CFStringRef> cfstring(
       CFStringCreateWithBytesNoCopy(
           NULL,
@@ -1200,26 +1203,27 @@
           kCFStringEncodingUTF8,
           false,
           kCFAllocatorNull));
-  // Query the maximum length needed to store the result. In most cases this
-  // will overestimate the required space. The return value also already
-  // includes the space needed for a terminating 0.
-  CFIndex length = CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring);
-  DCHECK_GT(length, 0);  // should be at least 1 for the 0-terminator.
-  // Reserve enough space for CFStringGetFileSystemRepresentation to write into.
-  // Also set the length to the maximum so that we can shrink it later.
-  // (Increasing rather than decreasing it would clobber the string contents!)
-  StringType result;
-  result.reserve(length);
-  result.resize(length - 1);
-  Boolean success = CFStringGetFileSystemRepresentation(cfstring,
-                                                        &result[0],
-                                                        length);
-  if (success) {
-    // Reduce result.length() to actual string length.
-    result.resize(strlen(result.c_str()));
-  } else {
-    // An error occurred -> clear result.
-    result.clear();
+  if (cfstring) {
+    // Query the maximum length needed to store the result. In most cases this
+    // will overestimate the required space. The return value also already
+    // includes the space needed for a terminating 0.
+    CFIndex length = CFStringGetMaximumSizeOfFileSystemRepresentation(cfstring);
+    DCHECK_GT(length, 0);  // should be at least 1 for the 0-terminator.
+    // Reserve enough space for CFStringGetFileSystemRepresentation to write
+    // into. Also set the length to the maximum so that we can shrink it later.
+    // (Increasing rather than decreasing it would clobber the string contents!)
+    result.reserve(length);
+    result.resize(length - 1);
+    Boolean success = CFStringGetFileSystemRepresentation(cfstring,
+                                                          &result[0],
+                                                          length);
+    if (success) {
+      // Reduce result.length() to actual string length.
+      result.resize(strlen(result.c_str()));
+    } else {
+      // An error occurred -> clear result.
+      result.clear();
+    }
   }
   return result;
 }
@@ -1307,7 +1311,7 @@
   return NormalizePathSeparatorsTo(kSeparators[0]);
 }
 
-FilePath FilePath::NormalizePathSeparatorsTo(CharType /* separator */) const {
+FilePath FilePath::NormalizePathSeparatorsTo(CharType separator) const {
 #if defined(FILE_PATH_USES_WIN_SEPARATORS)
   DCHECK_NE(kSeparators + kSeparatorsLength,
             std::find(kSeparators, kSeparators + kSeparatorsLength, separator));
@@ -1317,6 +1321,7 @@
   }
   return FilePath(copy);
 #else
+  (void)separator;  // Avoid an unused warning.
   return *this;
 #endif
 }
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
index b1d93a8..d8c5969 100644
--- a/base/files/file_path_unittest.cc
+++ b/base/files/file_path_unittest.cc
@@ -1296,4 +1296,23 @@
   EXPECT_EQ("foo", ss.str());
 }
 
+// Test GetHFSDecomposedForm should return empty result for invalid UTF-8
+// strings.
+#if defined(OS_MACOSX)
+TEST_F(FilePathTest, GetHFSDecomposedFormWithInvalidInput) {
+  const FilePath::CharType* cases[] = {
+    FPL("\xc3\x28"),
+    FPL("\xe2\x82\x28"),
+    FPL("\xe2\x28\xa1"),
+    FPL("\xf0\x28\x8c\xbc"),
+    FPL("\xf0\x28\x8c\x28"),
+  };
+  for (const auto& invalid_input : cases) {
+    FilePath::StringType observed = FilePath::GetHFSDecomposedForm(
+        invalid_input);
+    EXPECT_TRUE(observed.empty());
+  }
+}
+#endif
+
 }  // namespace base
diff --git a/base/files/file_path_watcher.cc b/base/files/file_path_watcher.cc
index 955e6a2..a4624ab 100644
--- a/base/files/file_path_watcher.cc
+++ b/base/files/file_path_watcher.cc
@@ -11,10 +11,6 @@
 #include "base/message_loop/message_loop.h"
 #include "build/build_config.h"
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-#include "base/mac/mac_util.h"
-#endif
-
 namespace base {
 
 FilePathWatcher::~FilePathWatcher() {
@@ -29,13 +25,11 @@
 
 // static
 bool FilePathWatcher::RecursiveWatchAvailable() {
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // FSEvents isn't available on iOS and is broken on OSX 10.6 and earlier.
-  // See http://crbug.com/54822#c31
-  return mac::IsOSLionOrLater();
-#elif defined(OS_WIN) || defined(OS_LINUX) || defined(OS_ANDROID)
+#if (defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_WIN) || \
+    defined(OS_LINUX) || defined(OS_ANDROID)
   return true;
 #else
+  // FSEvents isn't available on iOS.
   return false;
 #endif
 }
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc
index 78637aa..824e3d8 100644
--- a/base/files/file_path_watcher_fsevents.cc
+++ b/base/files/file_path_watcher_fsevents.cc
@@ -14,7 +14,7 @@
 #include "base/mac/scoped_cftyperef.h"
 #include "base/macros.h"
 #include "base/message_loop/message_loop.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
diff --git a/base/files/file_path_watcher_kqueue.cc b/base/files/file_path_watcher_kqueue.cc
index b6e61ab..6d034cd 100644
--- a/base/files/file_path_watcher_kqueue.cc
+++ b/base/files/file_path_watcher_kqueue.cc
@@ -12,7 +12,7 @@
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/strings/stringprintf.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 // On some platforms these are not defined.
 #if !defined(EV_RECEIPT)
diff --git a/base/files/file_path_watcher_linux.cc b/base/files/file_path_watcher_linux.cc
index a75eaba..ae293fe 100644
--- a/base/files/file_path_watcher_linux.cc
+++ b/base/files/file_path_watcher_linux.cc
@@ -14,6 +14,7 @@
 
 #include <algorithm>
 #include <map>
+#include <memory>
 #include <set>
 #include <utility>
 #include <vector>
@@ -27,13 +28,12 @@
 #include "base/location.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/single_thread_task_runner.h"
 #include "base/stl_util.h"
 #include "base/synchronization/lock.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
 
 namespace base {
diff --git a/base/files/file_path_watcher_unittest.cc b/base/files/file_path_watcher_unittest.cc
index a860b13..c85a50a 100644
--- a/base/files/file_path_watcher_unittest.cc
+++ b/base/files/file_path_watcher_unittest.cc
@@ -28,8 +28,8 @@
 #include "base/synchronization/waitable_event.h"
 #include "base/test/test_file_util.h"
 #include "base/test/test_timeouts.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -227,7 +227,7 @@
 // Basic test: Create the file and verify that we notice.
 TEST_F(FilePathWatcherTest, NewFile) {
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   ASSERT_TRUE(WriteFile(test_file(), "content"));
@@ -240,7 +240,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is modified.
@@ -255,7 +255,7 @@
   ASSERT_TRUE(WriteFile(source_file, "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is modified.
@@ -268,7 +268,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is deleted.
@@ -296,7 +296,7 @@
   FilePathWatcher* watcher() const { return watcher_.get(); }
 
  private:
-  scoped_ptr<FilePathWatcher> watcher_;
+  std::unique_ptr<FilePathWatcher> watcher_;
   MessageLoop* loop_;
 
   DISALLOW_COPY_AND_ASSIGN(Deleter);
@@ -306,7 +306,7 @@
 TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
   FilePathWatcher* watcher = new FilePathWatcher;
   // Takes ownership of watcher.
-  scoped_ptr<Deleter> deleter(new Deleter(watcher, &loop_));
+  std::unique_ptr<Deleter> deleter(new Deleter(watcher, &loop_));
   ASSERT_TRUE(SetupWatch(test_file(), watcher, deleter.get(), false));
 
   ASSERT_TRUE(WriteFile(test_file(), "content"));
@@ -321,7 +321,7 @@
 // notification.
 // Flaky on MacOS (and ARM linux): http://crbug.com/85930
 TEST_F(FilePathWatcherTest, DISABLED_DestroyWithPendingNotification) {
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   FilePathWatcher* watcher = new FilePathWatcher;
   ASSERT_TRUE(SetupWatch(test_file(), watcher, delegate.get(), false));
   ASSERT_TRUE(WriteFile(test_file(), "content"));
@@ -331,8 +331,8 @@
 
 TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
   FilePathWatcher watcher1, watcher2;
-  scoped_ptr<TestDelegate> delegate1(new TestDelegate(collector()));
-  scoped_ptr<TestDelegate> delegate2(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate1(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate2(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher1, delegate1.get(), false));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher2, delegate2.get(), false));
 
@@ -348,7 +348,7 @@
   FilePathWatcher watcher;
   FilePath dir(temp_dir_.path().AppendASCII("dir"));
   FilePath file(dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::CreateDirectory(dir));
@@ -381,7 +381,7 @@
 
   FilePathWatcher watcher;
   FilePath file(path.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
 
   FilePath sub_path(temp_dir_.path());
@@ -411,7 +411,7 @@
   FilePath file(dir.AppendASCII("file"));
   ASSERT_TRUE(base::CreateDirectory(dir));
   ASSERT_TRUE(WriteFile(file, "content"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::DeleteFile(dir, true));
@@ -423,7 +423,7 @@
 TEST_F(FilePathWatcherTest, DeleteAndRecreate) {
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::DeleteFile(test_file(), false));
@@ -441,7 +441,7 @@
   FilePath dir(temp_dir_.path().AppendASCII("dir"));
   FilePath file1(dir.AppendASCII("file1"));
   FilePath file2(dir.AppendASCII("file2"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(dir, &watcher, delegate.get(), false));
 
   ASSERT_TRUE(base::CreateDirectory(dir));
@@ -476,9 +476,9 @@
   FilePath dest(temp_dir_.path().AppendASCII("dest"));
   FilePath subdir(dir.AppendASCII("subdir"));
   FilePath file(subdir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(file, &file_watcher, file_delegate.get(), false));
-  scoped_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(subdir, &subdir_watcher, subdir_delegate.get(),
                          false));
 
@@ -499,7 +499,7 @@
 TEST_F(FilePathWatcherTest, RecursiveWatch) {
   FilePathWatcher watcher;
   FilePath dir(temp_dir_.path().AppendASCII("dir"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   bool setup_result = SetupWatch(dir, &watcher, delegate.get(), true);
   if (!FilePathWatcher::RecursiveWatchAvailable()) {
     ASSERT_FALSE(setup_result);
@@ -579,7 +579,7 @@
   FilePath test_dir(temp_dir_.path().AppendASCII("test_dir"));
   ASSERT_TRUE(base::CreateDirectory(test_dir));
   FilePath symlink(test_dir.AppendASCII("symlink"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(symlink, &watcher, delegate.get(), true));
 
   // Link creation.
@@ -626,9 +626,9 @@
   ASSERT_TRUE(base::CreateDirectory(source_subdir));
   ASSERT_TRUE(WriteFile(source_file, "content"));
 
-  scoped_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> file_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(dest_file, &file_watcher, file_delegate.get(), false));
-  scoped_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> subdir_delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(dest_subdir, &subdir_watcher, subdir_delegate.get(),
                          false));
 
@@ -651,7 +651,7 @@
 TEST_F(FilePathWatcherTest, FileAttributesChanged) {
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the file is modified.
@@ -665,7 +665,7 @@
 // Verify that creating a symlink is caught.
 TEST_F(FilePathWatcherTest, CreateLink) {
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -683,7 +683,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
   // Now make sure we get notified if the link is deleted.
@@ -698,7 +698,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink.
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -713,7 +713,7 @@
 TEST_F(FilePathWatcherTest, CreateTargetLinkedFile) {
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink.
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -729,7 +729,7 @@
   ASSERT_TRUE(WriteFile(test_file(), "content"));
   ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Note that we are watching the symlink.
   ASSERT_TRUE(SetupWatch(test_link(), &watcher, delegate.get(), false));
 
@@ -747,7 +747,7 @@
   FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // dir/file should exist.
   ASSERT_TRUE(base::CreateDirectory(dir));
   ASSERT_TRUE(WriteFile(file, "content"));
@@ -776,7 +776,7 @@
   FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   // Now create the link from dir.lnk pointing to dir but
   // neither dir nor dir/file exist yet.
   ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
@@ -806,7 +806,7 @@
   FilePath link_dir(temp_dir_.path().AppendASCII("dir.lnk"));
   FilePath file(dir.AppendASCII("file"));
   FilePath linkfile(link_dir.AppendASCII("file"));
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(base::CreateDirectory(dir));
   ASSERT_TRUE(CreateSymbolicLink(dir, link_dir));
   // Note that we are watching dir.lnk/file but the file doesn't exist yet.
@@ -883,7 +883,7 @@
   ASSERT_TRUE(WriteFile(test_file, "content"));
 
   FilePathWatcher watcher;
-  scoped_ptr<TestDelegate> delegate(new TestDelegate(collector()));
+  std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
   ASSERT_TRUE(SetupWatch(test_file, &watcher, delegate.get(), false));
 
   // We should not get notified in this case as it hasn't affected our ability
diff --git a/base/files/file_util.cc b/base/files/file_util.cc
index 3169370..80fa44f 100644
--- a/base/files/file_util.cc
+++ b/base/files/file_util.cc
@@ -137,7 +137,7 @@
   }
 
   const size_t kBufferSize = 1 << 16;
-  scoped_ptr<char[]> buf(new char[kBufferSize]);
+  std::unique_ptr<char[]> buf(new char[kBufferSize]);
   size_t len;
   size_t size = 0;
   bool read_status = true;
diff --git a/base/files/file_util.h b/base/files/file_util.h
index 05b3cbf..8fd9fff 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -19,7 +19,6 @@
 #include "base/base_export.h"
 #include "base/files/file.h"
 #include "base/files/file_path.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string16.h"
 #include "build/build_config.h"
 
@@ -289,6 +288,10 @@
 // be resolved with this function.
 BASE_EXPORT bool NormalizeToNativeFilePath(const FilePath& path,
                                            FilePath* nt_path);
+
+// Given an existing file in |path|, returns whether this file is on a network
+// drive or not. If |path| does not exist, this function returns false.
+BASE_EXPORT bool IsOnNetworkDrive(const base::FilePath& path);
 #endif
 
 // This function will return if the given file is a symlink or not.
diff --git a/base/files/file_util_linux.cc b/base/files/file_util_linux.cc
index 7999392..b0c6e03 100644
--- a/base/files/file_util_linux.cc
+++ b/base/files/file_util_linux.cc
@@ -24,6 +24,9 @@
 
   // Not all possible |statfs_buf.f_type| values are in linux/magic.h.
   // Missing values are copied from the statfs man page.
+  // In some platforms, |statfs_buf.f_type| is declared as signed, but some of
+  // the values will overflow it, causing narrowing warnings. Cast to the
+  // largest possible unsigned integer type to avoid it.
   switch (static_cast<uintmax_t>(statfs_buf.f_type)) {
     case 0:
       *type = FILE_SYSTEM_0;
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
index 3f5baa0..599759a 100644
--- a/base/files/file_util_posix.cc
+++ b/base/files/file_util_posix.cc
@@ -27,7 +27,6 @@
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/singleton.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/stl_util.h"
@@ -560,7 +559,7 @@
   return CreateTemporaryDirInDirImpl(base_dir, mkdtemp_template, new_dir);
 }
 
-bool CreateNewTempDirectory(const FilePath::StringType& /* prefix */,
+bool CreateNewTempDirectory(const FilePath::StringType& /*prefix*/,
                             FilePath* new_temp_path) {
   FilePath tmpdir;
   if (!GetTempDir(&tmpdir))
@@ -849,7 +848,7 @@
     return true;
   }
 #else
-  (void)executable;  // avoid unused warning when !defined(OS_LINUX)
+  (void)executable;  // Avoid unused warning when !defined(OS_LINUX).
 #endif
   return GetTempDir(path);
 }
diff --git a/base/files/important_file_writer.cc b/base/files/important_file_writer.cc
index b429305..28550ad 100644
--- a/base/files/important_file_writer.cc
+++ b/base/files/important_file_writer.cc
@@ -49,15 +49,16 @@
 };
 
 void LogFailure(const FilePath& path, TempFileFailure failure_code,
-                const std::string& message) {
+                StringPiece message) {
   UMA_HISTOGRAM_ENUMERATION("ImportantFile.TempFileFailures", failure_code,
                             TEMP_FILE_FAILURE_MAX);
   DPLOG(WARNING) << "temp file failure: " << path.value() << " : " << message;
 }
 
-// Helper function to call WriteFileAtomically() with a scoped_ptr<std::string>.
+// Helper function to call WriteFileAtomically() with a
+// std::unique_ptr<std::string>.
 bool WriteScopedStringToFileAtomically(const FilePath& path,
-                                       scoped_ptr<std::string> data) {
+                                       std::unique_ptr<std::string> data) {
   return ImportantFileWriter::WriteFileAtomically(path, *data);
 }
 
@@ -65,7 +66,7 @@
 
 // static
 bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
-                                              const std::string& data) {
+                                              StringPiece data) {
 #if defined(OS_CHROMEOS)
   // On Chrome OS, chrome gets killed when it cannot finish shutdown quickly,
   // and this function seems to be one of the slowest shutdown steps.
@@ -126,19 +127,18 @@
 
 ImportantFileWriter::ImportantFileWriter(
     const FilePath& path,
-    const scoped_refptr<SequencedTaskRunner>& task_runner)
+    scoped_refptr<SequencedTaskRunner> task_runner)
     : ImportantFileWriter(
-        path,
-        task_runner,
-        TimeDelta::FromMilliseconds(kDefaultCommitIntervalMs)) {
-}
+          path,
+          std::move(task_runner),
+          TimeDelta::FromMilliseconds(kDefaultCommitIntervalMs)) {}
 
 ImportantFileWriter::ImportantFileWriter(
     const FilePath& path,
-    const scoped_refptr<SequencedTaskRunner>& task_runner,
+    scoped_refptr<SequencedTaskRunner> task_runner,
     TimeDelta interval)
     : path_(path),
-      task_runner_(task_runner),
+      task_runner_(std::move(task_runner)),
       serializer_(nullptr),
       commit_interval_(interval),
       weak_factory_(this) {
@@ -158,7 +158,7 @@
   return timer_.IsRunning();
 }
 
-void ImportantFileWriter::WriteNow(scoped_ptr<std::string> data) {
+void ImportantFileWriter::WriteNow(std::unique_ptr<std::string> data) {
   DCHECK(CalledOnValidThread());
   if (!IsValueInRangeForNumericType<int32_t>(data->length())) {
     NOTREACHED();
@@ -193,7 +193,7 @@
 
 void ImportantFileWriter::DoScheduledWrite() {
   DCHECK(serializer_);
-  scoped_ptr<std::string> data(new std::string);
+  std::unique_ptr<std::string> data(new std::string);
   if (serializer_->SerializeData(data.get())) {
     WriteNow(std::move(data));
   } else {
diff --git a/base/files/important_file_writer.h b/base/files/important_file_writer.h
index 1b2ad5c..0bd8a7f 100644
--- a/base/files/important_file_writer.h
+++ b/base/files/important_file_writer.h
@@ -12,6 +12,7 @@
 #include "base/files/file_path.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
 #include "base/threading/non_thread_safe.h"
 #include "base/time/time.h"
 #include "base/timer/timer.h"
@@ -36,7 +37,7 @@
 // temporary file to target filename.
 //
 // If you want to know more about this approach and ext3/ext4 fsync issues, see
-// http://valhenson.livejournal.com/37921.html
+// http://blog.valerieaurora.org/2009/04/16/dont-panic-fsync-ext34-and-your-data/
 class BASE_EXPORT ImportantFileWriter : public NonThreadSafe {
  public:
   // Used by ScheduleSave to lazily provide the data to be saved. Allows us
@@ -54,8 +55,7 @@
 
   // Save |data| to |path| in an atomic manner (see the class comment above).
   // Blocks and writes data on the current thread.
-  static bool WriteFileAtomically(const FilePath& path,
-                                  const std::string& data);
+  static bool WriteFileAtomically(const FilePath& path, StringPiece data);
 
   // Initialize the writer.
   // |path| is the name of file to write.
@@ -63,11 +63,11 @@
   // execute file I/O operations.
   // All non-const methods, ctor and dtor must be called on the same thread.
   ImportantFileWriter(const FilePath& path,
-                      const scoped_refptr<SequencedTaskRunner>& task_runner);
+                      scoped_refptr<SequencedTaskRunner> task_runner);
 
   // Same as above, but with a custom commit interval.
   ImportantFileWriter(const FilePath& path,
-                      const scoped_refptr<SequencedTaskRunner>& task_runner,
+                      scoped_refptr<SequencedTaskRunner> task_runner,
                       TimeDelta interval);
 
   // You have to ensure that there are no pending writes at the moment
@@ -82,7 +82,7 @@
 
   // Save |data| to target filename. Does not block. If there is a pending write
   // scheduled by ScheduleWrite(), it is cancelled.
-  void WriteNow(scoped_ptr<std::string> data);
+  void WriteNow(std::unique_ptr<std::string> data);
 
   // Schedule a save to target filename. Data will be serialized and saved
   // to disk after the commit interval. If another ScheduleWrite is issued
diff --git a/base/files/important_file_writer_unittest.cc b/base/files/important_file_writer_unittest.cc
index 28e6001..ba1d4d3 100644
--- a/base/files/important_file_writer_unittest.cc
+++ b/base/files/important_file_writer_unittest.cc
@@ -12,10 +12,11 @@
 #include "base/location.h"
 #include "base/logging.h"
 #include "base/macros.h"
+#include "base/memory/ptr_util.h"
 #include "base/run_loop.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -103,7 +104,7 @@
   ImportantFileWriter writer(file_, ThreadTaskRunnerHandle::Get());
   EXPECT_FALSE(PathExists(writer.path()));
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
-  writer.WriteNow(make_scoped_ptr(new std::string("foo")));
+  writer.WriteNow(WrapUnique(new std::string("foo")));
   RunLoop().RunUntilIdle();
 
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
@@ -116,7 +117,7 @@
   EXPECT_FALSE(PathExists(writer.path()));
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
   successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
-  writer.WriteNow(make_scoped_ptr(new std::string("foo")));
+  writer.WriteNow(WrapUnique(new std::string("foo")));
   RunLoop().RunUntilIdle();
 
   // Confirm that the observer is invoked.
@@ -127,7 +128,7 @@
   // Confirm that re-installing the observer works for another write.
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
   successful_write_observer_.ObserveNextSuccessfulWrite(&writer);
-  writer.WriteNow(make_scoped_ptr(new std::string("bar")));
+  writer.WriteNow(WrapUnique(new std::string("bar")));
   RunLoop().RunUntilIdle();
 
   EXPECT_TRUE(successful_write_observer_.GetAndResetObservationState());
@@ -137,7 +138,7 @@
   // Confirm that writing again without re-installing the observer doesn't
   // result in a notification.
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
-  writer.WriteNow(make_scoped_ptr(new std::string("baz")));
+  writer.WriteNow(WrapUnique(new std::string("baz")));
   RunLoop().RunUntilIdle();
 
   EXPECT_FALSE(successful_write_observer_.GetAndResetObservationState());
diff --git a/base/files/memory_mapped_file.cc b/base/files/memory_mapped_file.cc
index 0fd9d67..67890d6 100644
--- a/base/files/memory_mapped_file.cc
+++ b/base/files/memory_mapped_file.cc
@@ -30,18 +30,30 @@
 }
 
 #if !defined(OS_NACL)
-bool MemoryMappedFile::Initialize(const FilePath& file_name) {
+bool MemoryMappedFile::Initialize(const FilePath& file_name, Access access) {
   if (IsValid())
     return false;
 
-  file_.Initialize(file_name, File::FLAG_OPEN | File::FLAG_READ);
+  uint32_t flags = 0;
+  switch (access) {
+    case READ_ONLY:
+      flags = File::FLAG_OPEN | File::FLAG_READ;
+      break;
+    case READ_WRITE:
+      flags = File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE;
+      break;
+    case READ_WRITE_EXTEND:
+      // Can't open with "extend" because no maximum size is known.
+      NOTREACHED();
+  }
+  file_.Initialize(file_name, flags);
 
   if (!file_.IsValid()) {
     DLOG(ERROR) << "Couldn't open " << file_name.AsUTF8Unsafe();
     return false;
   }
 
-  if (!MapFileRegionToMemory(Region::kWholeFile)) {
+  if (!MapFileRegionToMemory(Region::kWholeFile, access)) {
     CloseHandles();
     return false;
   }
@@ -49,11 +61,32 @@
   return true;
 }
 
-bool MemoryMappedFile::Initialize(File file) {
-  return Initialize(std::move(file), Region::kWholeFile);
+bool MemoryMappedFile::Initialize(File file, Access access) {
+  DCHECK_NE(READ_WRITE_EXTEND, access);
+  return Initialize(std::move(file), Region::kWholeFile, access);
 }
 
-bool MemoryMappedFile::Initialize(File file, const Region& region) {
+bool MemoryMappedFile::Initialize(File file,
+                                  const Region& region,
+                                  Access access) {
+  switch (access) {
+    case READ_WRITE_EXTEND:
+      // Ensure that the extended size is within limits of File.
+      if (region.size > std::numeric_limits<int64_t>::max() - region.offset) {
+        DLOG(ERROR) << "Region bounds exceed maximum for base::File.";
+        return false;
+      }
+      // Fall through.
+    case READ_ONLY:
+    case READ_WRITE:
+      // Ensure that the region values are valid.
+      if (region.offset < 0 || region.size < 0) {
+        DLOG(ERROR) << "Region bounds are not valid.";
+        return false;
+      }
+      break;
+  }
+
   if (IsValid())
     return false;
 
@@ -64,7 +97,7 @@
 
   file_ = std::move(file);
 
-  if (!MapFileRegionToMemory(region)) {
+  if (!MapFileRegionToMemory(region, access)) {
     CloseHandles();
     return false;
   }
diff --git a/base/files/memory_mapped_file.h b/base/files/memory_mapped_file.h
index 6362e76..cad99f6 100644
--- a/base/files/memory_mapped_file.h
+++ b/base/files/memory_mapped_file.h
@@ -23,6 +23,29 @@
 
 class BASE_EXPORT MemoryMappedFile {
  public:
+  enum Access {
+    // Mapping a file into memory effectively allows for file I/O on any thread.
+    // The accessing thread could be paused while data from the file is paged
+    // into memory. Worse, a corrupted filesystem could cause a SEGV within the
+    // program instead of just an I/O error.
+    READ_ONLY,
+
+    // This provides read/write access to a file and must be used with care of
+    // the additional subtleties involved in doing so. Though the OS will do
+    // the writing of data on its own time, too many dirty pages can cause
+    // the OS to pause the thread while it writes them out. The pause can
+    // be as much as 1s on some systems.
+    READ_WRITE,
+
+    // This provides read/write access but with the ability to write beyond
+    // the end of the existing file up to a maximum size specified as the
+    // "region". Depending on the OS, the file may or may not be immediately
+    // extended to the maximum size though it won't be loaded in RAM until
+    // needed. Note, however, that the maximum size will still be reserved
+    // in the process address space.
+    READ_WRITE_EXTEND,
+  };
+
   // The default constructor sets all members to invalid/null values.
   MemoryMappedFile();
   ~MemoryMappedFile();
@@ -41,27 +64,37 @@
     int64_t size;
   };
 
-  // Opens an existing file and maps it into memory. Access is restricted to
-  // read only. If this object already points to a valid memory mapped file
-  // then this method will fail and return false. If it cannot open the file,
-  // the file does not exist, or the memory mapping fails, it will return false.
-  // Later we may want to allow the user to specify access.
-  bool Initialize(const FilePath& file_name);
+  // Opens an existing file and maps it into memory. |access| can be read-only
+  // or read/write but not read/write+extend. If this object already points
+  // to a valid memory mapped file then this method will fail and return
+  // false. If it cannot open the file, the file does not exist, or the
+  // memory mapping fails, it will return false.
+  bool Initialize(const FilePath& file_name, Access access);
+  bool Initialize(const FilePath& file_name) {
+    return Initialize(file_name, READ_ONLY);
+  }
 
-  // As above, but works with an already-opened file. MemoryMappedFile takes
-  // ownership of |file| and closes it when done.
-  bool Initialize(File file);
+  // As above, but works with an already-opened file. |access| can be read-only
+  // or read/write but not read/write+extend. MemoryMappedFile takes ownership
+  // of |file| and closes it when done. |file| must have been opened with
+  // permissions suitable for |access|. If the memory mapping fails, it will
+  // return false.
+  bool Initialize(File file, Access access);
+  bool Initialize(File file) {
+    return Initialize(std::move(file), READ_ONLY);
+  }
 
-  // As above, but works with a region of an already-opened file.
-  bool Initialize(File file, const Region& region);
-
-#if defined(OS_WIN)
-  // Opens an existing file and maps it as an image section. Please refer to
-  // the Initialize function above for additional information.
-  bool InitializeAsImageSection(const FilePath& file_name);
-#endif  // OS_WIN
+  // As above, but works with a region of an already-opened file. All forms of
+  // |access| are allowed. If READ_WRITE_EXTEND is specified then |region|
+  // provides the maximum size of the file. If the memory mapping fails, it
+  // return false.
+  bool Initialize(File file, const Region& region, Access access);
+  bool Initialize(File file, const Region& region) {
+    return Initialize(std::move(file), region, READ_ONLY);
+  }
 
   const uint8_t* data() const { return data_; }
+  uint8_t* data() { return data_; }
   size_t length() const { return length_; }
 
   // Is file_ a valid file handle that points to an open, memory mapped file?
@@ -82,7 +115,7 @@
 
   // Map the file to memory, set data_ to that memory address. Return true on
   // success, false on any kind of failure. This is a helper for Initialize().
-  bool MapFileRegionToMemory(const Region& region);
+  bool MapFileRegionToMemory(const Region& region, Access access);
 
   // Closes all open handles.
   void CloseHandles();
@@ -93,7 +126,6 @@
 
 #if defined(OS_WIN)
   win::ScopedHandle file_mapping_;
-  bool image_;  // Map as an image.
 #endif
 
   DISALLOW_COPY_AND_ASSIGN(MemoryMappedFile);
diff --git a/base/files/memory_mapped_file_posix.cc b/base/files/memory_mapped_file_posix.cc
index 1067fdc..4899cf0 100644
--- a/base/files/memory_mapped_file_posix.cc
+++ b/base/files/memory_mapped_file_posix.cc
@@ -21,7 +21,8 @@
 
 #if !defined(OS_NACL)
 bool MemoryMappedFile::MapFileRegionToMemory(
-    const MemoryMappedFile::Region& region) {
+    const MemoryMappedFile::Region& region,
+    Access access) {
   ThreadRestrictions::AssertIOAllowed();
 
   off_t map_start = 0;
@@ -65,7 +66,23 @@
     length_ = static_cast<size_t>(region.size);
   }
 
-  data_ = static_cast<uint8_t*>(mmap(NULL, map_size, PROT_READ, MAP_SHARED,
+  int flags = 0;
+  switch (access) {
+    case READ_ONLY:
+      flags |= PROT_READ;
+      break;
+    case READ_WRITE:
+      flags |= PROT_READ | PROT_WRITE;
+      break;
+    case READ_WRITE_EXTEND:
+      // POSIX won't auto-extend the file when it is written so it must first
+      // be explicitly extended to the maximum size. Zeros will fill the new
+      // space.
+      file_.SetLength(std::max(file_.GetLength(), region.offset + region.size));
+      flags |= PROT_READ | PROT_WRITE;
+      break;
+  }
+  data_ = static_cast<uint8_t*>(mmap(NULL, map_size, flags, MAP_SHARED,
                                      file_.GetPlatformFile(), map_start));
   if (data_ == MAP_FAILED) {
     DPLOG(ERROR) << "mmap " << file_.GetPlatformFile();
diff --git a/base/files/scoped_file.h b/base/files/scoped_file.h
index 106f6ad..68c0415 100644
--- a/base/files/scoped_file.h
+++ b/base/files/scoped_file.h
@@ -7,9 +7,10 @@
 
 #include <stdio.h>
 
+#include <memory>
+
 #include "base/base_export.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/scoped_generic.h"
 #include "build/build_config.h"
 
@@ -54,7 +55,7 @@
 #endif
 
 // Automatically closes |FILE*|s.
-typedef scoped_ptr<FILE, internal::ScopedFILECloser> ScopedFILE;
+typedef std::unique_ptr<FILE, internal::ScopedFILECloser> ScopedFILE;
 
 }  // namespace base
 
diff --git a/base/guid.cc b/base/guid.cc
index 99b037b..5714073 100644
--- a/base/guid.cc
+++ b/base/guid.cc
@@ -5,12 +5,21 @@
 #include "base/guid.h"
 
 #include <stddef.h>
+#include <stdint.h>
 
+#include "base/rand_util.h"
 #include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
 
 namespace base {
 
-bool IsValidGUID(const std::string& guid) {
+namespace {
+
+bool IsLowerHexDigit(char c) {
+  return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f');
+}
+
+bool IsValidGUIDInternal(const base::StringPiece& guid, bool strict) {
   const size_t kGUIDLength = 36U;
   if (guid.length() != kGUIDLength)
     return false;
@@ -21,7 +30,7 @@
       if (current != '-')
         return false;
     } else {
-      if (!IsHexDigit(current))
+      if ((strict && !IsLowerHexDigit(current)) || !IsHexDigit(current))
         return false;
     }
   }
@@ -29,4 +38,42 @@
   return true;
 }
 
+}  // namespace
+
+std::string GenerateGUID() {
+  uint64_t sixteen_bytes[2] = {base::RandUint64(), base::RandUint64()};
+
+  // Set the GUID to version 4 as described in RFC 4122, section 4.4.
+  // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+  // where y is one of [8, 9, A, B].
+
+  // Clear the version bits and set the version to 4:
+  sixteen_bytes[0] &= 0xffffffffffff0fffULL;
+  sixteen_bytes[0] |= 0x0000000000004000ULL;
+
+  // Set the two most significant bits (bits 6 and 7) of the
+  // clock_seq_hi_and_reserved to zero and one, respectively:
+  sixteen_bytes[1] &= 0x3fffffffffffffffULL;
+  sixteen_bytes[1] |= 0x8000000000000000ULL;
+
+  return RandomDataToGUIDString(sixteen_bytes);
+}
+
+bool IsValidGUID(const base::StringPiece& guid) {
+  return IsValidGUIDInternal(guid, false /* strict */);
+}
+
+bool IsValidGUIDOutputString(const base::StringPiece& guid) {
+  return IsValidGUIDInternal(guid, true /* strict */);
+}
+
+std::string RandomDataToGUIDString(const uint64_t bytes[2]) {
+  return StringPrintf("%08x-%04x-%04x-%04x-%012llx",
+                      static_cast<unsigned int>(bytes[0] >> 32),
+                      static_cast<unsigned int>((bytes[0] >> 16) & 0x0000ffff),
+                      static_cast<unsigned int>(bytes[0] & 0x0000ffff),
+                      static_cast<unsigned int>(bytes[1] >> 48),
+                      bytes[1] & 0x0000ffffffffffffULL);
+}
+
 }  // namespace base
diff --git a/base/guid.h b/base/guid.h
index c0a06f8..29c24ea 100644
--- a/base/guid.h
+++ b/base/guid.h
@@ -10,23 +10,33 @@
 #include <string>
 
 #include "base/base_export.h"
+#include "base/strings/string_piece.h"
 #include "build/build_config.h"
 
 namespace base {
 
-// Generate a 128-bit random GUID of the form: "%08X-%04X-%04X-%04X-%012llX".
+// Generate a 128-bit (pseudo) random GUID in the form of version 4 as described
+// in RFC 4122, section 4.4.
+// The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
+// where y is one of [8, 9, A, B].
+// The hexadecimal values "a" through "f" are output as lower case characters.
 // If GUID generation fails an empty string is returned.
-// The POSIX implementation uses pseudo random number generation to create
-// the GUID.  The Windows implementation uses system services.
 BASE_EXPORT std::string GenerateGUID();
 
-// Returns true if the input string conforms to the GUID format.
-BASE_EXPORT bool IsValidGUID(const std::string& guid);
+// Returns true if the input string conforms to the version 4 GUID format.
+// Note that this does NOT check if the hexadecimal values "a" through "f"
+// are in lower case characters, as Version 4 RFC says onput they're
+// case insensitive. (Use IsValidGUIDOutputString for checking if the
+// given string is valid output string)
+BASE_EXPORT bool IsValidGUID(const base::StringPiece& guid);
 
-#if defined(OS_POSIX)
+// Returns true if the input string is valid version 4 GUID output string.
+// This also checks if the hexadecimal values "a" through "f" are in lower
+// case characters.
+BASE_EXPORT bool IsValidGUIDOutputString(const base::StringPiece& guid);
+
 // For unit testing purposes only.  Do not use outside of tests.
 BASE_EXPORT std::string RandomDataToGUIDString(const uint64_t bytes[2]);
-#endif
 
 }  // namespace base
 
diff --git a/base/guid_posix.cc b/base/guid_posix.cc
deleted file mode 100644
index ec1ca51..0000000
--- a/base/guid_posix.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/guid.h"
-
-#include <stdint.h>
-
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-
-namespace base {
-
-std::string GenerateGUID() {
-  uint64_t sixteen_bytes[2] = {base::RandUint64(), base::RandUint64()};
-
-  // Set the GUID to version 4 as described in RFC 4122, section 4.4.
-  // The format of GUID version 4 must be xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx,
-  // where y is one of [8, 9, A, B].
-
-  // Clear the version bits and set the version to 4:
-  sixteen_bytes[0] &= 0xffffffffffff0fffULL;
-  sixteen_bytes[0] |= 0x0000000000004000ULL;
-
-  // Set the two most significant bits (bits 6 and 7) of the
-  // clock_seq_hi_and_reserved to zero and one, respectively:
-  sixteen_bytes[1] &= 0x3fffffffffffffffULL;
-  sixteen_bytes[1] |= 0x8000000000000000ULL;
-
-  return RandomDataToGUIDString(sixteen_bytes);
-}
-
-// TODO(cmasone): Once we're comfortable this works, migrate Windows code to
-// use this as well.
-std::string RandomDataToGUIDString(const uint64_t bytes[2]) {
-  return StringPrintf("%08X-%04X-%04X-%04X-%012llX",
-                      static_cast<unsigned int>(bytes[0] >> 32),
-                      static_cast<unsigned int>((bytes[0] >> 16) & 0x0000ffff),
-                      static_cast<unsigned int>(bytes[0] & 0x0000ffff),
-                      static_cast<unsigned int>(bytes[1] >> 48),
-                      bytes[1] & 0x0000ffffffffffffULL);
-}
-
-}  // namespace base
diff --git a/base/guid_unittest.cc b/base/guid_unittest.cc
index b6d976d..70dad67 100644
--- a/base/guid_unittest.cc
+++ b/base/guid_unittest.cc
@@ -14,8 +14,6 @@
 
 namespace base {
 
-#if defined(OS_POSIX)
-
 namespace {
 
 bool IsGUIDv4(const std::string& guid) {
@@ -37,15 +35,15 @@
 TEST(GUIDTest, GUIDGeneratesCorrectly) {
   uint64_t bytes[] = {0x0123456789ABCDEFULL, 0xFEDCBA9876543210ULL};
   std::string clientid = RandomDataToGUIDString(bytes);
-  EXPECT_EQ("01234567-89AB-CDEF-FEDC-BA9876543210", clientid);
+  EXPECT_EQ("01234567-89ab-cdef-fedc-ba9876543210", clientid);
 }
-#endif
 
 TEST(GUIDTest, GUIDCorrectlyFormatted) {
   const int kIterations = 10;
   for (int it = 0; it < kIterations; ++it) {
     std::string guid = GenerateGUID();
     EXPECT_TRUE(IsValidGUID(guid));
+    EXPECT_TRUE(IsValidGUIDOutputString(guid));
     EXPECT_TRUE(IsValidGUID(ToLowerASCII(guid)));
     EXPECT_TRUE(IsValidGUID(ToUpperASCII(guid)));
   }
@@ -59,10 +57,8 @@
     EXPECT_EQ(36U, guid1.length());
     EXPECT_EQ(36U, guid2.length());
     EXPECT_NE(guid1, guid2);
-#if defined(OS_POSIX)
     EXPECT_TRUE(IsGUIDv4(guid1));
     EXPECT_TRUE(IsGUIDv4(guid2));
-#endif
   }
 }
 
diff --git a/base/hash.cc b/base/hash.cc
index 4274772..4dfd0d0 100644
--- a/base/hash.cc
+++ b/base/hash.cc
@@ -8,7 +8,7 @@
 
 namespace base {
 
-uint32_t SuperFastHash(const char* data, int len) {
+uint32_t SuperFastHash(const char* data, size_t len) {
   std::hash<std::string> hash_fn;
   return hash_fn(std::string(data, len));
 }
diff --git a/base/hash.h b/base/hash.h
index 97e251c..7c0fba6 100644
--- a/base/hash.h
+++ b/base/hash.h
@@ -18,16 +18,12 @@
 namespace base {
 
 // WARNING: This hash function should not be used for any cryptographic purpose.
-BASE_EXPORT uint32_t SuperFastHash(const char* data, int len);
+BASE_EXPORT uint32_t SuperFastHash(const char* data, size_t length);
 
 // Computes a hash of a memory buffer |data| of a given |length|.
 // WARNING: This hash function should not be used for any cryptographic purpose.
 inline uint32_t Hash(const char* data, size_t length) {
-  if (length > static_cast<size_t>(std::numeric_limits<int>::max())) {
-    NOTREACHED();
-    return 0;
-  }
-  return SuperFastHash(data, static_cast<int>(length));
+  return SuperFastHash(data, length);
 }
 
 // Computes a hash of a string |str|.
diff --git a/base/json/json_file_value_serializer.cc b/base/json/json_file_value_serializer.cc
index 516f876..1a9b7a2 100644
--- a/base/json/json_file_value_serializer.cc
+++ b/base/json/json_file_value_serializer.cc
@@ -101,7 +101,7 @@
   }
 }
 
-scoped_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
+std::unique_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
     int* error_code,
     std::string* error_str) {
   std::string json_string;
diff --git a/base/json/json_file_value_serializer.h b/base/json/json_file_value_serializer.h
index f6b4e5f..67d2342 100644
--- a/base/json/json_file_value_serializer.h
+++ b/base/json/json_file_value_serializer.h
@@ -60,8 +60,8 @@
   // If |error_message| is non-null, it will be filled in with a formatted
   // error message including the location of the error if appropriate.
   // The caller takes ownership of the returned value.
-  scoped_ptr<base::Value> Deserialize(int* error_code,
-                                      std::string* error_message) override;
+  std::unique_ptr<base::Value> Deserialize(int* error_code,
+                                           std::string* error_message) override;
 
   // This enum is designed to safely overlap with JSONReader::JsonParseError.
   enum JsonFileError {
diff --git a/base/json/json_parser.cc b/base/json/json_parser.cc
index fbd4da4..c1bcf4a 100644
--- a/base/json/json_parser.cc
+++ b/base/json/json_parser.cc
@@ -5,10 +5,11 @@
 #include "base/json/json_parser.h"
 
 #include <cmath>
+#include <utility>
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
@@ -27,16 +28,19 @@
 
 const int32_t kExtendedASCIIStart = 0x80;
 
-// This and the class below are used to own the JSON input string for when
-// string tokens are stored as StringPiece instead of std::string. This
-// optimization avoids about 2/3rds of string memory copies. The constructor
-// takes ownership of the input string. The real root value is Swap()ed into
-// the new instance.
+// DictionaryHiddenRootValue and ListHiddenRootValue are used in conjunction
+// with JSONStringValue as an optimization for reducing the number of string
+// copies. When this optimization is active, the parser uses a hidden root to
+// keep the original JSON input string live and creates JSONStringValue children
+// holding StringPiece references to the input string, avoiding about 2/3rds of
+// string memory copies. The real root value is Swap()ed into the new instance.
 class DictionaryHiddenRootValue : public DictionaryValue {
  public:
-  DictionaryHiddenRootValue(std::string* json, Value* root) : json_(json) {
+  DictionaryHiddenRootValue(std::unique_ptr<std::string> json,
+                            std::unique_ptr<Value> root)
+      : json_(std::move(json)) {
     DCHECK(root->IsType(Value::TYPE_DICTIONARY));
-    DictionaryValue::Swap(static_cast<DictionaryValue*>(root));
+    DictionaryValue::Swap(static_cast<DictionaryValue*>(root.get()));
   }
 
   void Swap(DictionaryValue* other) override {
@@ -44,7 +48,7 @@
 
     // First deep copy to convert JSONStringValue to std::string and swap that
     // copy with |other|, which contains the new contents of |this|.
-    scoped_ptr<DictionaryValue> copy(DeepCopy());
+    std::unique_ptr<DictionaryValue> copy(CreateDeepCopy());
     copy->Swap(other);
 
     // Then erase the contents of the current dictionary and swap in the
@@ -58,7 +62,7 @@
   // the method below.
 
   bool RemoveWithoutPathExpansion(const std::string& key,
-                                  scoped_ptr<Value>* out) override {
+                                  std::unique_ptr<Value>* out) override {
     // If the caller won't take ownership of the removed value, just call up.
     if (!out)
       return DictionaryValue::RemoveWithoutPathExpansion(key, out);
@@ -67,26 +71,28 @@
 
     // Otherwise, remove the value while its still "owned" by this and copy it
     // to convert any JSONStringValues to std::string.
-    scoped_ptr<Value> out_owned;
+    std::unique_ptr<Value> out_owned;
     if (!DictionaryValue::RemoveWithoutPathExpansion(key, &out_owned))
       return false;
 
-    out->reset(out_owned->DeepCopy());
+    *out = out_owned->CreateDeepCopy();
 
     return true;
   }
 
  private:
-  scoped_ptr<std::string> json_;
+  std::unique_ptr<std::string> json_;
 
   DISALLOW_COPY_AND_ASSIGN(DictionaryHiddenRootValue);
 };
 
 class ListHiddenRootValue : public ListValue {
  public:
-  ListHiddenRootValue(std::string* json, Value* root) : json_(json) {
+  ListHiddenRootValue(std::unique_ptr<std::string> json,
+                      std::unique_ptr<Value> root)
+      : json_(std::move(json)) {
     DCHECK(root->IsType(Value::TYPE_LIST));
-    ListValue::Swap(static_cast<ListValue*>(root));
+    ListValue::Swap(static_cast<ListValue*>(root.get()));
   }
 
   void Swap(ListValue* other) override {
@@ -94,7 +100,7 @@
 
     // First deep copy to convert JSONStringValue to std::string and swap that
     // copy with |other|, which contains the new contents of |this|.
-    scoped_ptr<ListValue> copy(DeepCopy());
+    std::unique_ptr<ListValue> copy(CreateDeepCopy());
     copy->Swap(other);
 
     // Then erase the contents of the current list and swap in the new contents,
@@ -104,7 +110,7 @@
     ListValue::Swap(copy.get());
   }
 
-  bool Remove(size_t index, scoped_ptr<Value>* out) override {
+  bool Remove(size_t index, std::unique_ptr<Value>* out) override {
     // If the caller won't take ownership of the removed value, just call up.
     if (!out)
       return ListValue::Remove(index, out);
@@ -113,17 +119,17 @@
 
     // Otherwise, remove the value while its still "owned" by this and copy it
     // to convert any JSONStringValues to std::string.
-    scoped_ptr<Value> out_owned;
+    std::unique_ptr<Value> out_owned;
     if (!ListValue::Remove(index, &out_owned))
       return false;
 
-    out->reset(out_owned->DeepCopy());
+    *out = out_owned->CreateDeepCopy();
 
     return true;
   }
 
  private:
-  scoped_ptr<std::string> json_;
+  std::unique_ptr<std::string> json_;
 
   DISALLOW_COPY_AND_ASSIGN(ListHiddenRootValue);
 };
@@ -133,10 +139,8 @@
 // otherwise the referenced string will not be guaranteed to outlive it.
 class JSONStringValue : public Value {
  public:
-  explicit JSONStringValue(const StringPiece& piece)
-      : Value(TYPE_STRING),
-        string_piece_(piece) {
-  }
+  explicit JSONStringValue(StringPiece piece)
+      : Value(TYPE_STRING), string_piece_(piece) {}
 
   // Overridden from Value:
   bool GetAsString(std::string* out_value) const override {
@@ -203,13 +207,13 @@
 JSONParser::~JSONParser() {
 }
 
-Value* JSONParser::Parse(const StringPiece& input) {
-  scoped_ptr<std::string> input_copy;
+std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
+  std::unique_ptr<std::string> input_copy;
   // If the children of a JSON root can be detached, then hidden roots cannot
   // be used, so do not bother copying the input because StringPiece will not
   // be used anywhere.
   if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
-    input_copy.reset(new std::string(input.as_string()));
+    input_copy = WrapUnique(new std::string(input.as_string()));
     start_pos_ = input_copy->data();
   } else {
     start_pos_ = input.data();
@@ -235,15 +239,15 @@
   }
 
   // Parse the first and any nested tokens.
-  scoped_ptr<Value> root(ParseNextToken());
-  if (!root.get())
-    return NULL;
+  std::unique_ptr<Value> root(ParseNextToken());
+  if (!root)
+    return nullptr;
 
   // Make sure the input stream is at an end.
   if (GetNextToken() != T_END_OF_INPUT) {
     if (!CanConsume(1) || (NextChar() && GetNextToken() != T_END_OF_INPUT)) {
       ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1);
-      return NULL;
+      return nullptr;
     }
   }
 
@@ -251,19 +255,21 @@
   // hidden root.
   if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
     if (root->IsType(Value::TYPE_DICTIONARY)) {
-      return new DictionaryHiddenRootValue(input_copy.release(), root.get());
+      return WrapUnique(new DictionaryHiddenRootValue(std::move(input_copy),
+                                                      std::move(root)));
     } else if (root->IsType(Value::TYPE_LIST)) {
-      return new ListHiddenRootValue(input_copy.release(), root.get());
+      return WrapUnique(
+          new ListHiddenRootValue(std::move(input_copy), std::move(root)));
     } else if (root->IsType(Value::TYPE_STRING)) {
       // A string type could be a JSONStringValue, but because there's no
       // corresponding HiddenRootValue, the memory will be lost. Deep copy to
       // preserve it.
-      return root->DeepCopy();
+      return root->CreateDeepCopy();
     }
   }
 
   // All other values can be returned directly.
-  return root.release();
+  return root;
 }
 
 JSONReader::JsonParseError JSONParser::error_code() const {
@@ -309,7 +315,7 @@
 
 void JSONParser::StringBuilder::Append(const char& c) {
   DCHECK_GE(c, 0);
-  DCHECK_LT(c, 128);
+  DCHECK_LT(static_cast<unsigned char>(c), 128);
 
   if (string_)
     string_->push_back(c);
@@ -499,7 +505,7 @@
     return NULL;
   }
 
-  scoped_ptr<DictionaryValue> dict(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> dict(new DictionaryValue);
 
   NextChar();
   Token token = GetNextToken();
@@ -563,7 +569,7 @@
     return NULL;
   }
 
-  scoped_ptr<ListValue> list(new ListValue);
+  std::unique_ptr<ListValue> list(new ListValue);
 
   NextChar();
   Token token = GetNextToken();
diff --git a/base/json/json_parser.h b/base/json/json_parser.h
index fc04594..5bdec58 100644
--- a/base/json/json_parser.h
+++ b/base/json/json_parser.h
@@ -8,6 +8,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
@@ -50,7 +51,7 @@
 
   // Parses the input string according to the set options and returns the
   // result as a Value owned by the caller.
-  Value* Parse(const StringPiece& input);
+  std::unique_ptr<Value> Parse(StringPiece input);
 
   // Returns the error code.
   JSONReader::JsonParseError error_code() const;
@@ -133,7 +134,7 @@
     size_t length_;
 
     // The copied string representation. NULL until Convert() is called.
-    // Strong. scoped_ptr<T> has too much of an overhead here.
+    // Strong. std::unique_ptr<T> has too much of an overhead here.
     std::string* string_;
   };
 
diff --git a/base/json/json_parser_unittest.cc b/base/json/json_parser_unittest.cc
index da86b33..30255ca 100644
--- a/base/json/json_parser_unittest.cc
+++ b/base/json/json_parser_unittest.cc
@@ -6,8 +6,9 @@
 
 #include <stddef.h>
 
+#include <memory>
+
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -34,7 +35,7 @@
 
 TEST_F(JSONParserTest, NextChar) {
   std::string input("Hello world");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
 
   EXPECT_EQ('H', *parser->pos_);
   for (size_t i = 1; i < input.length(); ++i) {
@@ -45,8 +46,8 @@
 
 TEST_F(JSONParserTest, ConsumeString) {
   std::string input("\"test\",|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeString());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeString());
   EXPECT_EQ('"', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -59,8 +60,8 @@
 
 TEST_F(JSONParserTest, ConsumeList) {
   std::string input("[true, false],|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeList());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeList());
   EXPECT_EQ(']', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -73,8 +74,8 @@
 
 TEST_F(JSONParserTest, ConsumeDictionary) {
   std::string input("{\"abc\":\"def\"},|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeDictionary());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeDictionary());
   EXPECT_EQ('}', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -90,8 +91,8 @@
 TEST_F(JSONParserTest, ConsumeLiterals) {
   // Literal |true|.
   std::string input("true,|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeLiteral());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeLiteral());
   EXPECT_EQ('e', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -128,8 +129,8 @@
 TEST_F(JSONParserTest, ConsumeNumbers) {
   // Integer.
   std::string input("1234,|");
-  scoped_ptr<JSONParser> parser(NewTestParser(input));
-  scoped_ptr<Value> value(parser->ConsumeNumber());
+  std::unique_ptr<JSONParser> parser(NewTestParser(input));
+  std::unique_ptr<Value> value(parser->ConsumeNumber());
   EXPECT_EQ('4', *parser->pos_);
 
   TestLastThree(parser.get());
@@ -205,7 +206,7 @@
   // Error strings should not be modified in case of success.
   std::string error_message;
   int error_code = 0;
-  scoped_ptr<Value> root = JSONReader::ReadAndReturnError(
+  std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
       "[42]", JSON_PARSE_RFC, &error_code, &error_message);
   EXPECT_TRUE(error_message.empty());
   EXPECT_EQ(0, error_code);
@@ -309,7 +310,7 @@
       "[\"😇\",[],[],[],{\"google:suggesttype\":[]}]";
   std::string error_message;
   int error_code = 0;
-  scoped_ptr<Value> root = JSONReader::ReadAndReturnError(
+  std::unique_ptr<Value> root = JSONReader::ReadAndReturnError(
       kUtf8Data, JSON_PARSE_RFC, &error_code, &error_message);
   EXPECT_TRUE(root.get()) << error_message;
 }
diff --git a/base/json/json_reader.cc b/base/json/json_reader.cc
index 3ab5f75..4ff7496 100644
--- a/base/json/json_reader.cc
+++ b/base/json/json_reader.cc
@@ -43,27 +43,28 @@
 }
 
 // static
-scoped_ptr<Value> JSONReader::Read(const StringPiece& json) {
+std::unique_ptr<Value> JSONReader::Read(StringPiece json) {
   internal::JSONParser parser(JSON_PARSE_RFC);
-  return make_scoped_ptr(parser.Parse(json));
+  return parser.Parse(json);
 }
 
 // static
-scoped_ptr<Value> JSONReader::Read(const StringPiece& json, int options) {
+std::unique_ptr<Value> JSONReader::Read(StringPiece json, int options) {
   internal::JSONParser parser(options);
-  return make_scoped_ptr(parser.Parse(json));
+  return parser.Parse(json);
 }
 
 
 // static
-scoped_ptr<Value> JSONReader::ReadAndReturnError(const StringPiece& json,
-                                                 int options,
-                                                 int* error_code_out,
-                                                 std::string* error_msg_out,
-                                                 int* error_line_out,
-                                                 int* error_column_out) {
+std::unique_ptr<Value> JSONReader::ReadAndReturnError(
+    const StringPiece& json,
+    int options,
+    int* error_code_out,
+    std::string* error_msg_out,
+    int* error_line_out,
+    int* error_column_out) {
   internal::JSONParser parser(options);
-  scoped_ptr<Value> root(parser.Parse(json));
+  std::unique_ptr<Value> root(parser.Parse(json));
   if (!root) {
     if (error_code_out)
       *error_code_out = parser.error_code();
@@ -105,8 +106,8 @@
   }
 }
 
-scoped_ptr<Value> JSONReader::ReadToValue(const std::string& json) {
-  return make_scoped_ptr(parser_->Parse(json));
+std::unique_ptr<Value> JSONReader::ReadToValue(StringPiece json) {
+  return parser_->Parse(json);
 }
 
 JSONReader::JsonParseError JSONReader::error_code() const {
diff --git a/base/json/json_reader.h b/base/json/json_reader.h
index c6bcb52..f647724 100644
--- a/base/json/json_reader.h
+++ b/base/json/json_reader.h
@@ -28,10 +28,10 @@
 #ifndef BASE_JSON_JSON_READER_H_
 #define BASE_JSON_JSON_READER_H_
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 
 namespace base {
@@ -93,30 +93,31 @@
 
   // Reads and parses |json|, returning a Value. The caller owns the returned
   // instance. If |json| is not a properly formed JSON string, returns NULL.
-  static scoped_ptr<Value> Read(const StringPiece& json);
+  static std::unique_ptr<Value> Read(StringPiece json);
 
   // Reads and parses |json|, returning a Value owned by the caller. The
   // parser respects the given |options|. If the input is not properly formed,
   // returns NULL.
-  static scoped_ptr<Value> Read(const StringPiece& json, int options);
+  static std::unique_ptr<Value> Read(StringPiece json, int options);
 
   // Reads and parses |json| like Read(). |error_code_out| and |error_msg_out|
   // are optional. If specified and NULL is returned, they will be populated
   // an error code and a formatted error message (including error location if
   // appropriate). Otherwise, they will be unmodified.
-  static scoped_ptr<Value> ReadAndReturnError(const StringPiece& json,
-                                              int options,  // JSONParserOptions
-                                              int* error_code_out,
-                                              std::string* error_msg_out,
-                                              int* error_line_out = nullptr,
-                                              int* error_column_out = nullptr);
+  static std::unique_ptr<Value> ReadAndReturnError(
+      const StringPiece& json,
+      int options,  // JSONParserOptions
+      int* error_code_out,
+      std::string* error_msg_out,
+      int* error_line_out = nullptr,
+      int* error_column_out = nullptr);
 
   // Converts a JSON parse error code into a human readable message.
   // Returns an empty string if error_code is JSON_NO_ERROR.
   static std::string ErrorCodeToString(JsonParseError error_code);
 
   // Parses an input string into a Value that is owned by the caller.
-  scoped_ptr<Value> ReadToValue(const std::string& json);
+  std::unique_ptr<Value> ReadToValue(StringPiece json);
 
   // Returns the error code if the last call to ReadToValue() failed.
   // Returns JSON_NO_ERROR otherwise.
@@ -127,7 +128,7 @@
   std::string GetErrorMessage() const;
 
  private:
-  scoped_ptr<internal::JSONParser> parser_;
+  std::unique_ptr<internal::JSONParser> parser_;
 };
 
 }  // namespace base
diff --git a/base/json/json_reader_unittest.cc b/base/json/json_reader_unittest.cc
index 45c04d8..c54dafa 100644
--- a/base/json/json_reader_unittest.cc
+++ b/base/json/json_reader_unittest.cc
@@ -14,7 +14,6 @@
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/values.h"
@@ -25,7 +24,7 @@
 
 TEST(JSONReaderTest, Reading) {
   // some whitespace checking
-  scoped_ptr<Value> root = JSONReader().ReadToValue("   null   ");
+  std::unique_ptr<Value> root = JSONReader().ReadToValue("   null   ");
   ASSERT_TRUE(root.get());
   EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
 
@@ -254,7 +253,7 @@
   EXPECT_EQ(3U, list->GetSize());
 
   // Test with trailing comma.  Should be parsed the same as above.
-  scoped_ptr<Value> root2 =
+  std::unique_ptr<Value> root2 =
       JSONReader::Read("[true, false, null, ]", JSON_ALLOW_TRAILING_COMMAS);
   EXPECT_TRUE(root->Equals(root2.get()));
 
@@ -557,7 +556,7 @@
       path.Append(FILE_PATH_LITERAL("bom_feff.json")), &input));
 
   JSONReader reader;
-  scoped_ptr<Value> root(reader.ReadToValue(input));
+  std::unique_ptr<Value> root(reader.ReadToValue(input));
   ASSERT_TRUE(root.get()) << reader.GetErrorMessage();
   EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
 }
@@ -566,15 +565,15 @@
 // Tests that the root of a JSON object can be deleted safely while its
 // children outlive it.
 TEST(JSONReaderTest, StringOptimizations) {
-  scoped_ptr<Value> dict_literal_0;
-  scoped_ptr<Value> dict_literal_1;
-  scoped_ptr<Value> dict_string_0;
-  scoped_ptr<Value> dict_string_1;
-  scoped_ptr<Value> list_value_0;
-  scoped_ptr<Value> list_value_1;
+  std::unique_ptr<Value> dict_literal_0;
+  std::unique_ptr<Value> dict_literal_1;
+  std::unique_ptr<Value> dict_string_0;
+  std::unique_ptr<Value> dict_string_1;
+  std::unique_ptr<Value> list_value_0;
+  std::unique_ptr<Value> list_value_1;
 
   {
-    scoped_ptr<Value> root = JSONReader::Read(
+    std::unique_ptr<Value> root = JSONReader::Read(
         "{"
         "  \"test\": {"
         "    \"foo\": true,"
diff --git a/base/json/json_string_value_serializer.cc b/base/json/json_string_value_serializer.cc
index af7e010..cd786db 100644
--- a/base/json/json_string_value_serializer.cc
+++ b/base/json/json_string_value_serializer.cc
@@ -48,7 +48,7 @@
 
 JSONStringValueDeserializer::~JSONStringValueDeserializer() {}
 
-scoped_ptr<Value> JSONStringValueDeserializer::Deserialize(
+std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
     int* error_code,
     std::string* error_str) {
   return base::JSONReader::ReadAndReturnError(
diff --git a/base/json/json_string_value_serializer.h b/base/json/json_string_value_serializer.h
index 2459f48..a97da23 100644
--- a/base/json/json_string_value_serializer.h
+++ b/base/json/json_string_value_serializer.h
@@ -59,8 +59,8 @@
   // If |error_message| is non-null, it will be filled in with a formatted
   // error message including the location of the error if appropriate.
   // The caller takes ownership of the returned value.
-  scoped_ptr<base::Value> Deserialize(int* error_code,
-                                      std::string* error_message) override;
+  std::unique_ptr<base::Value> Deserialize(int* error_code,
+                                           std::string* error_message) override;
 
   void set_allow_trailing_comma(bool new_value) {
     allow_trailing_comma_ = new_value;
diff --git a/base/json/json_value_converter.h b/base/json/json_value_converter.h
index a1e0d5b..4cca034 100644
--- a/base/json/json_value_converter.h
+++ b/base/json/json_value_converter.h
@@ -7,13 +7,13 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/scoped_vector.h"
 #include "base/stl_util.h"
 #include "base/strings/string16.h"
@@ -131,7 +131,7 @@
 
  private:
   FieldType StructType::* field_pointer_;
-  scoped_ptr<ValueConverter<FieldType> > value_converter_;
+  std::unique_ptr<ValueConverter<FieldType>> value_converter_;
   DISALLOW_COPY_AND_ASSIGN(FieldConverter);
 };
 
@@ -266,7 +266,7 @@
       if (!list->Get(i, &element))
         continue;
 
-      scoped_ptr<Element> e(new Element);
+      std::unique_ptr<Element> e(new Element);
       if (basic_converter_.Convert(*element, e.get())) {
         field->push_back(e.release());
       } else {
@@ -300,7 +300,7 @@
       if (!list->Get(i, &element))
         continue;
 
-      scoped_ptr<NestedType> nested(new NestedType);
+      std::unique_ptr<NestedType> nested(new NestedType);
       if (converter_.Convert(*element, nested.get())) {
         field->push_back(nested.release());
       } else {
@@ -337,7 +337,7 @@
       if (!list->Get(i, &element))
         continue;
 
-      scoped_ptr<NestedType> nested(new NestedType);
+      std::unique_ptr<NestedType> nested(new NestedType);
       if ((*convert_func_)(element, nested.get())) {
         field->push_back(nested.release());
       } else {
diff --git a/base/json/json_value_converter_unittest.cc b/base/json/json_value_converter_unittest.cc
index 9038610..56ade24 100644
--- a/base/json/json_value_converter_unittest.cc
+++ b/base/json/json_value_converter_unittest.cc
@@ -4,11 +4,11 @@
 
 #include "base/json/json_value_converter.h"
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/scoped_vector.h"
 #include "base/strings/string_piece.h"
 #include "base/values.h"
@@ -106,7 +106,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_TRUE(converter.Convert(*value.get(), &message));
@@ -148,7 +148,7 @@
       "  }]\n"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   NestedMessage message;
   base::JSONValueConverter<NestedMessage> converter;
   EXPECT_TRUE(converter.Convert(*value.get(), &message));
@@ -190,7 +190,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_FALSE(converter.Convert(*value.get(), &message));
@@ -206,7 +206,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   // Convert() still succeeds even if the input doesn't have "bar" field.
@@ -229,7 +229,7 @@
       "  \"ints\": [1, 2]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_FALSE(converter.Convert(*value.get(), &message));
@@ -246,7 +246,7 @@
       "  \"ints\": [1, false]"
       "}\n";
 
-  scoped_ptr<Value> value = base::JSONReader::Read(normal_data);
+  std::unique_ptr<Value> value = base::JSONReader::Read(normal_data);
   SimpleMessage message;
   base::JSONValueConverter<SimpleMessage> converter;
   EXPECT_FALSE(converter.Convert(*value.get(), &message));
diff --git a/base/json/json_value_serializer_unittest.cc b/base/json/json_value_serializer_unittest.cc
index 7f2ae10..0c079b7 100644
--- a/base/json/json_value_serializer_unittest.cc
+++ b/base/json/json_value_serializer_unittest.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <memory>
 #include <string>
 
 #include "base/files/file_util.h"
@@ -10,7 +11,6 @@
 #include "base/json/json_reader.h"
 #include "base/json/json_string_value_serializer.h"
 #include "base/json/json_writer.h"
-#include "base/memory/scoped_ptr.h"
 #if !defined(__ANDROID__) && !defined(__ANDROID_HOST__)
 #include "base/path_service.h"
 #endif
@@ -78,7 +78,7 @@
 }
 
 void ValidateJsonList(const std::string& json) {
-  scoped_ptr<Value> root = JSONReader::Read(json);
+  std::unique_ptr<Value> root = JSONReader::Read(json);
   ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
   ListValue* list = static_cast<ListValue*>(root.get());
   ASSERT_EQ(1U, list->GetSize());
@@ -96,7 +96,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
@@ -115,7 +115,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
@@ -132,7 +132,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       str_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_FALSE(value.get());
   ASSERT_NE(0, error_code);
@@ -160,7 +160,7 @@
 
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       file_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_TRUE(value.get());
   ASSERT_EQ(0, error_code);
@@ -185,7 +185,7 @@
   // This must fail without the proper flag.
   int error_code = 0;
   std::string error_message;
-  scoped_ptr<Value> value =
+  std::unique_ptr<Value> value =
       file_deserializer.Deserialize(&error_code, &error_message);
   ASSERT_FALSE(value.get());
   ASSERT_NE(0, error_code);
@@ -200,8 +200,8 @@
 }
 
 TEST(JSONValueDeserializerTest, AllowTrailingComma) {
-  scoped_ptr<Value> root;
-  scoped_ptr<Value> root_expected;
+  std::unique_ptr<Value> root;
+  std::unique_ptr<Value> root_expected;
   static const char kTestWithCommas[] = "{\"key\": [true,],}";
   static const char kTestNoCommas[] = "{\"key\": [true]}";
 
@@ -219,7 +219,7 @@
   static const char kOriginalSerialization[] =
     "{\"bool\":true,\"double\":3.14,\"int\":42,\"list\":[1,2],\"null\":null}";
   JSONStringValueDeserializer deserializer(kOriginalSerialization);
-  scoped_ptr<Value> root = deserializer.Deserialize(NULL, NULL);
+  std::unique_ptr<Value> root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(root.get());
   ASSERT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
 
@@ -329,7 +329,7 @@
 
   // escaped ascii text -> json
   JSONStringValueDeserializer deserializer(kExpected);
-  scoped_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+  std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(deserial_root.get());
   DictionaryValue* dict_root =
       static_cast<DictionaryValue*>(deserial_root.get());
@@ -353,7 +353,7 @@
 
   // escaped ascii text -> json
   JSONStringValueDeserializer deserializer(kExpected);
-  scoped_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
+  std::unique_ptr<Value> deserial_root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(deserial_root.get());
   DictionaryValue* dict_root =
       static_cast<DictionaryValue*>(deserial_root.get());
@@ -380,7 +380,7 @@
   ValidateJsonList("[ 1 //// ,2\r\n ]");
 
   // It's ok to have a comment in a string.
-  scoped_ptr<Value> root = JSONReader::Read("[\"// ok\\n /* foo */ \"]");
+  std::unique_ptr<Value> root = JSONReader::Read("[\"// ok\\n /* foo */ \"]");
   ASSERT_TRUE(root.get() && root->IsType(Value::TYPE_LIST));
   ListValue* list = static_cast<ListValue*>(root.get());
   ASSERT_EQ(1U, list->GetSize());
@@ -416,7 +416,7 @@
   ASSERT_TRUE(PathExists(original_file_path));
 
   JSONFileValueDeserializer deserializer(original_file_path);
-  scoped_ptr<Value> root;
+  std::unique_ptr<Value> root;
   root = deserializer.Deserialize(NULL, NULL);
 
   ASSERT_TRUE(root.get());
@@ -464,7 +464,7 @@
   ASSERT_TRUE(PathExists(original_file_path));
 
   JSONFileValueDeserializer deserializer(original_file_path);
-  scoped_ptr<Value> root;
+  std::unique_ptr<Value> root;
   root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(root.get());
 
@@ -489,7 +489,7 @@
       FILE_PATH_LITERAL("serializer_test_nowhitespace.json"));
   ASSERT_TRUE(PathExists(source_file_path));
   JSONFileValueDeserializer deserializer(source_file_path);
-  scoped_ptr<Value> root;
+  std::unique_ptr<Value> root;
   root = deserializer.Deserialize(NULL, NULL);
   ASSERT_TRUE(root.get());
 }
diff --git a/base/json/json_writer_unittest.cc b/base/json/json_writer_unittest.cc
index a62b3ba..37ad268 100644
--- a/base/json/json_writer_unittest.cc
+++ b/base/json/json_writer_unittest.cc
@@ -3,6 +3,8 @@
 // found in the LICENSE file.
 
 #include "base/json/json_writer.h"
+
+#include "base/memory/ptr_util.h"
 #include "base/values.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -55,11 +57,11 @@
   // Writer unittests like empty list/dict nesting,
   // list list nesting, etc.
   DictionaryValue root_dict;
-  scoped_ptr<ListValue> list(new ListValue());
-  scoped_ptr<DictionaryValue> inner_dict(new DictionaryValue());
+  std::unique_ptr<ListValue> list(new ListValue());
+  std::unique_ptr<DictionaryValue> inner_dict(new DictionaryValue());
   inner_dict->SetInteger("inner int", 10);
   list->Append(std::move(inner_dict));
-  list->Append(make_scoped_ptr(new ListValue()));
+  list->Append(WrapUnique(new ListValue()));
   list->AppendBoolean(true);
   root_dict.Set("list", std::move(list));
 
@@ -91,7 +93,7 @@
   DictionaryValue period_dict;
   period_dict.SetIntegerWithoutPathExpansion("a.b", 3);
   period_dict.SetIntegerWithoutPathExpansion("c", 2);
-  scoped_ptr<DictionaryValue> period_dict2(new DictionaryValue());
+  std::unique_ptr<DictionaryValue> period_dict2(new DictionaryValue());
   period_dict2->SetIntegerWithoutPathExpansion("g.h.i.j", 1);
   period_dict.SetWithoutPathExpansion("d.e.f", std::move(period_dict2));
   EXPECT_TRUE(JSONWriter::Write(period_dict, &output_js));
@@ -109,7 +111,7 @@
 
   // Binary values should return errors unless suppressed via the
   // OPTIONS_OMIT_BINARY_VALUES flag.
-  scoped_ptr<Value> root(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
+  std::unique_ptr<Value> root(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   EXPECT_FALSE(JSONWriter::Write(*root, &output_js));
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
       *root, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
@@ -117,9 +119,9 @@
 
   ListValue binary_list;
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
-  binary_list.Append(make_scoped_ptr(new FundamentalValue(5)));
+  binary_list.Append(WrapUnique(new FundamentalValue(5)));
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
-  binary_list.Append(make_scoped_ptr(new FundamentalValue(2)));
+  binary_list.Append(WrapUnique(new FundamentalValue(2)));
   binary_list.Append(BinaryValue::CreateWithCopiedBuffer("asdf", 4));
   EXPECT_FALSE(JSONWriter::Write(binary_list, &output_js));
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
@@ -128,13 +130,13 @@
 
   DictionaryValue binary_dict;
   binary_dict.Set(
-      "a", make_scoped_ptr(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+      "a", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
   binary_dict.SetInteger("b", 5);
   binary_dict.Set(
-      "c", make_scoped_ptr(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+      "c", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
   binary_dict.SetInteger("d", 2);
   binary_dict.Set(
-      "e", make_scoped_ptr(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
+      "e", WrapUnique(BinaryValue::CreateWithCopiedBuffer("asdf", 4)));
   EXPECT_FALSE(JSONWriter::Write(binary_dict, &output_js));
   EXPECT_TRUE(JSONWriter::WriteWithOptions(
       binary_dict, JSONWriter::OPTIONS_OMIT_BINARY_VALUES, &output_js));
diff --git a/base/lazy_instance.h b/base/lazy_instance.h
index 1a921e6..ac970c5 100644
--- a/base/lazy_instance.h
+++ b/base/lazy_instance.h
@@ -39,6 +39,7 @@
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
+#include "base/debug/leak_annotations.h"
 #include "base/logging.h"
 #include "base/memory/aligned_memory.h"
 #include "base/threading/thread_restrictions.h"
@@ -97,10 +98,10 @@
 #endif
 
   static Type* New(void* instance) {
+    ANNOTATE_SCOPED_MEMORY_LEAK;
     return DefaultLazyInstanceTraits<Type>::New(instance);
   }
-  static void Delete(Type* /* instance */) {
-  }
+  static void Delete(Type*) {}
 };
 
 // Our AtomicWord doubles as a spinlock, where a value of
diff --git a/base/logging.cc b/base/logging.cc
index 1ebb84f..3b65962 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -30,8 +30,6 @@
 #elif defined(OS_POSIX)
 #if defined(OS_NACL)
 #include <sys/time.h>  // timespec doesn't seem to be in <time.h>
-#else
-#include <sys/syscall.h>
 #endif
 #include <time.h>
 #endif
@@ -51,7 +49,6 @@
 #endif
 
 #include <algorithm>
-#include <cassert>
 #include <cstring>
 #include <ctime>
 #include <iomanip>
@@ -63,7 +60,6 @@
 #include "base/debug/alias.h"
 #include "base/debug/debugger.h"
 #include "base/debug/stack_trace.h"
-#include "base/files/file_path.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/string_piece.h"
 #include "base/strings/string_util.h"
@@ -77,6 +73,10 @@
 #include "base/posix/safe_strerror.h"
 #endif
 
+#if !defined(OS_ANDROID)
+#include "base/files/file_path.h"
+#endif
+
 #if defined(OS_ANDROID) || defined(__ANDROID__)
 #include <android/log.h>
 #endif
@@ -209,8 +209,7 @@
     UnlockLogging();
   }
 
-  static void Init(LogLockingState lock_log,
-                   const PathChar* /* new_log_file */) {
+  static void Init(LogLockingState lock_log, const PathChar* /*new_log_file*/) {
     if (initialized)
       return;
     lock_log_file = lock_log;
@@ -461,6 +460,11 @@
 template std::string* MakeCheckOpString<std::string, std::string>(
     const std::string&, const std::string&, const char* name);
 
+template <>
+void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t&) {
+  (*os) << "nullptr";
+}
+
 #if !defined(NDEBUG)
 // Displays a message box to the user with the error message in it.
 // Used for fatal messages, where we close the app simultaneously.
@@ -769,8 +773,12 @@
     stream_ << base::PlatformThread::CurrentId() << ':';
   if (g_log_timestamp) {
     time_t t = time(nullptr);
+#if defined(__ANDROID__) || defined(ANDROID)
     struct tm local_time;
     memset(&local_time, 0, sizeof(local_time));
+#else
+    struct tm local_time = {0};
+#endif
 #ifdef _MSC_VER
     localtime_s(&local_time, &t);
 #else
diff --git a/base/logging.h b/base/logging.h
index 06f38f4..bebf526 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -459,7 +459,7 @@
 // boolean.
 class CheckOpResult {
  public:
-  // |message| must be null if and only if the check failed.
+  // |message| must be non-null if and only if the check failed.
   CheckOpResult(std::string* message) : message_(message) {}
   // Returns true if the check succeeded.
   operator bool() const { return !message_; }
@@ -477,22 +477,28 @@
 // We make sure CHECK et al. always evaluates their arguments, as
 // doing CHECK(FunctionWithSideEffect()) is a common idiom.
 
-#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && !defined(OS_ANDROID)
+#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
 
 // Make all CHECK functions discard their log strings to reduce code
-// bloat for official release builds (except Android).
+// bloat, and improve performance, for official release builds.
 
-// TODO(akalin): This would be more valuable if there were some way to
-// remove BreakDebugger() from the backtrace, perhaps by turning it
-// into a macro (like __debugbreak() on Windows).
+#if defined(COMPILER_GCC) || __clang__
+#define LOGGING_CRASH() __builtin_trap()
+#else
+#define LOGGING_CRASH() ((void)(*(volatile char*)0 = 0))
+#endif
+
+// This is not calling BreakDebugger since this is called frequently, and
+// calling an out-of-line function instead of a noreturn inline macro prevents
+// compiler optimizations.
 #define CHECK(condition)                                                \
-  !(condition) ? ::base::debug::BreakDebugger() : EAT_STREAM_PARAMETERS
+  !(condition) ? LOGGING_CRASH() : EAT_STREAM_PARAMETERS
 
 #define PCHECK(condition) CHECK(condition)
 
 #define CHECK_OP(name, op, val1, val2) CHECK((val1) op (val2))
 
-#else
+#else  // !(OFFICIAL_BUILD && NDEBUG)
 
 #if defined(_PREFAST_) && defined(OS_WIN)
 // Use __analysis_assume to tell the VC++ static analysis engine that
@@ -540,7 +546,19 @@
   else                                                                         \
     logging::LogMessage(__FILE__, __LINE__, true_if_passed.message()).stream()
 
-#endif
+#endif  // !(OFFICIAL_BUILD && NDEBUG)
+
+// This formats a value for a failing CHECK_XX statement.  Ordinarily,
+// it uses the definition for operator<<, with a few special cases below.
+template <typename T>
+inline void MakeCheckOpValueString(std::ostream* os, const T& v) {
+  (*os) << v;
+}
+
+// We need an explicit specialization for std::nullptr_t.
+template <>
+BASE_EXPORT void MakeCheckOpValueString(std::ostream* os,
+                                        const std::nullptr_t& p);
 
 // Build the error message string.  This is separate from the "Impl"
 // function template because it is not performance critical and so can
@@ -549,7 +567,11 @@
 template<class t1, class t2>
 std::string* MakeCheckOpString(const t1& v1, const t2& v2, const char* names) {
   std::ostringstream ss;
-  ss << names << " (" << v1 << " vs. " << v2 << ")";
+  ss << names << " (";
+  MakeCheckOpValueString(&ss, v1);
+  ss << " vs. ";
+  MakeCheckOpValueString(&ss, v2);
+  ss << ")";
   std::string* msg = new std::string(ss.str());
   return msg;
 }
@@ -601,7 +623,7 @@
 #define CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
 #define CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
 
-#if defined(NDEBUG)
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
 #define ENABLE_DLOG 0
 #else
 #define ENABLE_DLOG 1
@@ -743,9 +765,10 @@
 // for example:
 //   DCHECK_EQ(string("abc")[1], 'b');
 //
-// WARNING: These may not compile correctly if one of the arguments is a pointer
-// and the other is NULL. To work around this, simply static_cast NULL to the
-// type of the desired pointer.
+// WARNING: These don't compile correctly if one of the arguments is a pointer
+// and the other is NULL.  In new code, prefer nullptr instead.  To
+// work around this for C++98, simply static_cast NULL to the type of the
+// desired pointer.
 
 #define DCHECK_EQ(val1, val2) DCHECK_OP(EQ, ==, val1, val2)
 #define DCHECK_NE(val1, val2) DCHECK_OP(NE, !=, val1, val2)
diff --git a/base/logging_unittest.cc b/base/logging_unittest.cc
index 22fb855..7254265 100644
--- a/base/logging_unittest.cc
+++ b/base/logging_unittest.cc
@@ -191,7 +191,7 @@
 #endif
 
 TEST_F(LoggingTest, DebugLoggingReleaseBehavior) {
-#if !defined(NDEBUG)
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
   int debug_only_variable = 1;
 #endif
   // These should avoid emitting references to |debug_only_variable|
@@ -226,7 +226,7 @@
   // Release build with real DCHECKS.
   SetLogAssertHandler(&LogSink);
   EXPECT_TRUE(DCHECK_IS_ON());
-  EXPECT_FALSE(DLOG_IS_ON(DCHECK));
+  EXPECT_TRUE(DLOG_IS_ON(DCHECK));
 #else
   // Debug build.
   SetLogAssertHandler(&LogSink);
@@ -241,6 +241,16 @@
   EXPECT_EQ(DCHECK_IS_ON() ? 2 : 0, log_sink_call_count);
   DCHECK_EQ(0, 1);
   EXPECT_EQ(DCHECK_IS_ON() ? 3 : 0, log_sink_call_count);
+
+  // Test DCHECK on std::nullptr_t
+  log_sink_call_count = 0;
+  const void* p_null = nullptr;
+  const void* p_not_null = &p_null;
+  DCHECK_EQ(p_null, nullptr);
+  DCHECK_EQ(nullptr, p_null);
+  DCHECK_NE(p_not_null, nullptr);
+  DCHECK_NE(nullptr, p_not_null);
+  EXPECT_EQ(0, log_sink_call_count);
 }
 
 TEST_F(LoggingTest, DcheckReleaseBehavior) {
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
index 6ae5df3..d872fc3 100644
--- a/base/mac/foundation_util.mm
+++ b/base/mac/foundation_util.mm
@@ -18,6 +18,10 @@
 #include "build/build_config.h"
 
 #if !defined(OS_IOS)
+#import <AppKit/AppKit.h>
+#endif
+
+#if !defined(OS_IOS)
 extern "C" {
 CFTypeID SecACLGetTypeID();
 CFTypeID SecTrustedApplicationGetTypeID();
@@ -316,7 +320,7 @@
   DCHECK(!cf_val ||
          CTFontGetTypeID() == CFGetTypeID(cf_val) ||
          (_CFIsObjC(CTFontGetTypeID(), cf_val) &&
-          [ns_val isKindOfClass:NSClassFromString(@"NSFont")]));
+          [ns_val isKindOfClass:[NSFont class]]));
   return ns_val;
 }
 
@@ -324,7 +328,7 @@
   CTFontRef cf_val = reinterpret_cast<CTFontRef>(ns_val);
   DCHECK(!cf_val ||
          CTFontGetTypeID() == CFGetTypeID(cf_val) ||
-         [ns_val isKindOfClass:NSClassFromString(@"NSFont")]);
+         [ns_val isKindOfClass:[NSFont class]]);
   return cf_val;
 }
 #endif
@@ -388,7 +392,7 @@
     return NULL;
 
   id<NSObject> ns_val = reinterpret_cast<id>(const_cast<void*>(cf_val));
-  if ([ns_val isKindOfClass:NSClassFromString(@"NSFont")]) {
+  if ([ns_val isKindOfClass:[NSFont class]]) {
     return (CTFontRef)(cf_val);
   }
   return NULL;
diff --git a/base/mac/mac_util.h b/base/mac/mac_util.h
index 7772e88..c72c5f1 100644
--- a/base/mac/mac_util.h
+++ b/base/mac/mac_util.h
@@ -11,13 +11,6 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/logging.h"
-
-#if defined(__OBJC__)
-#import <Foundation/Foundation.h>
-#else  // __OBJC__
-class NSImage;
-#endif  // __OBJC__
 
 namespace base {
 
@@ -73,12 +66,6 @@
 BASE_EXPORT void SwitchFullScreenModes(FullScreenMode from_mode,
                                        FullScreenMode to_mode);
 
-// Set the visibility of the cursor.
-BASE_EXPORT void SetCursorVisibility(bool visible);
-
-// Activates the process with the given PID.
-BASE_EXPORT void ActivateProcess(pid_t pid);
-
 // Returns true if this process is in the foreground, meaning that it's the
 // frontmost process, the one whose menu bar is shown at the top of the main
 // display.
@@ -126,14 +113,6 @@
 // "OrLater" variants to those that check for a specific version, unless you
 // know for sure that you need to check for a specific version.
 
-// Snow Leopard is Mac OS X 10.6, Darwin 10.
-BASE_EXPORT bool IsOSSnowLeopard();
-
-// Lion is Mac OS X 10.7, Darwin 11.
-BASE_EXPORT bool IsOSLion();
-BASE_EXPORT bool IsOSLionOrEarlier();
-BASE_EXPORT bool IsOSLionOrLater();
-
 // Mountain Lion is Mac OS X 10.8, Darwin 12.
 BASE_EXPORT bool IsOSMountainLion();
 BASE_EXPORT bool IsOSMountainLionOrEarlier();
@@ -160,7 +139,6 @@
 
 // Inline functions that are redundant due to version ranges being mutually-
 // exclusive.
-inline bool IsOSLionOrEarlier() { return !IsOSMountainLionOrLater(); }
 inline bool IsOSMountainLionOrEarlier() { return !IsOSMavericksOrLater(); }
 inline bool IsOSMavericksOrEarlier() { return !IsOSYosemiteOrLater(); }
 inline bool IsOSYosemiteOrEarlier() { return !IsOSElCapitanOrLater(); }
@@ -170,19 +148,6 @@
 // constant-value inline functions. The MAC_OS_X_VERSION_MIN_REQUIRED macro
 // contains the value of the deployment target.
 
-#if defined(MAC_OS_X_VERSION_10_7) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_7
-#define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_7
-inline bool IsOSSnowLeopard() { return false; }
-inline bool IsOSLionOrLater() { return true; }
-#endif
-
-#if defined(MAC_OS_X_VERSION_10_7) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_7
-#define BASE_MAC_MAC_UTIL_H_INLINED_GT_10_7
-inline bool IsOSLion() { return false; }
-#endif
-
 #if defined(MAC_OS_X_VERSION_10_8) && \
     MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_8
 #define BASE_MAC_MAC_UTIL_H_INLINED_GE_10_8
diff --git a/base/mac/mach_port_broker.h b/base/mac/mach_port_broker.h
index ba08b6f..4554b6a 100644
--- a/base/mac/mach_port_broker.h
+++ b/base/mac/mach_port_broker.h
@@ -8,13 +8,13 @@
 #include <mach/mach.h>
 
 #include <map>
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/mac/dispatch_source_mach.h"
 #include "base/mac/scoped_mach_port.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/process/port_provider_mac.h"
 #include "base/process/process_handle.h"
 #include "base/synchronization/lock.h"
@@ -91,7 +91,7 @@
   base::mac::ScopedMachReceiveRight server_port_;
 
   // The dispatch source and queue on which Mach messages will be received.
-  scoped_ptr<base::DispatchSourceMach> dispatch_source_;
+  std::unique_ptr<base::DispatchSourceMach> dispatch_source_;
 
   // Stores mach info for every process in the broker.
   typedef std::map<base::ProcessHandle, mach_port_t> MachMap;
diff --git a/base/mac/scoped_authorizationref.h b/base/mac/scoped_authorizationref.h
index 39afa8c..03cde86 100644
--- a/base/mac/scoped_authorizationref.h
+++ b/base/mac/scoped_authorizationref.h
@@ -61,10 +61,9 @@
     authorization_ = temp;
   }
 
-  // ScopedAuthorizationRef::release() is like scoped_ptr<>::release.  It is
-  // NOT a wrapper for AuthorizationFree().  To force a
-  // ScopedAuthorizationRef object to call AuthorizationFree(), use
-  // ScopedAuthorizationRef::reset().
+  // ScopedAuthorizationRef::release() is like std::unique_ptr<>::release. It is
+  // NOT a wrapper for AuthorizationFree(). To force a ScopedAuthorizationRef
+  // object to call AuthorizationFree(), use ScopedAuthorizationRef::reset().
   AuthorizationRef release() WARN_UNUSED_RESULT {
     AuthorizationRef temp = authorization_;
     authorization_ = NULL;
diff --git a/base/mac/scoped_cftyperef.h b/base/mac/scoped_cftyperef.h
index 1be0fbe..ccbc5cf 100644
--- a/base/mac/scoped_cftyperef.h
+++ b/base/mac/scoped_cftyperef.h
@@ -11,10 +11,10 @@
 
 namespace base {
 
-// ScopedCFTypeRef<> is patterned after scoped_ptr<>, but maintains ownership
-// of a CoreFoundation object: any object that can be represented as a
-// CFTypeRef.  Style deviations here are solely for compatibility with
-// scoped_ptr<>'s interface, with which everyone is already familiar.
+// ScopedCFTypeRef<> is patterned after std::unique_ptr<>, but maintains
+// ownership of a CoreFoundation object: any object that can be represented
+// as a CFTypeRef.  Style deviations here are solely for compatibility with
+// std::unique_ptr<>'s interface, with which everyone is already familiar.
 //
 // By default, ScopedCFTypeRef<> takes ownership of an object (in the
 // constructor or in reset()) by taking over the caller's existing ownership
diff --git a/base/mac/scoped_launch_data.h b/base/mac/scoped_launch_data.h
index da62006..f4db330 100644
--- a/base/mac/scoped_launch_data.h
+++ b/base/mac/scoped_launch_data.h
@@ -21,7 +21,7 @@
 
 }  // namespace internal
 
-// Just like scoped_ptr<> but for launch_data_t.
+// Just like std::unique_ptr<> but for launch_data_t.
 using ScopedLaunchData =
     ScopedGeneric<launch_data_t, internal::ScopedLaunchDataTraits>;
 
diff --git a/base/mac/scoped_nsobject.h b/base/mac/scoped_nsobject.h
index 04c5877..4b26acf 100644
--- a/base/mac/scoped_nsobject.h
+++ b/base/mac/scoped_nsobject.h
@@ -19,10 +19,10 @@
 
 namespace base {
 
-// scoped_nsobject<> is patterned after scoped_ptr<>, but maintains ownership
-// of an NSObject subclass object.  Style deviations here are solely for
-// compatibility with scoped_ptr<>'s interface, with which everyone is already
-// familiar.
+// scoped_nsobject<> is patterned after std::unique_ptr<>, but maintains
+// ownership of an NSObject subclass object.  Style deviations here are solely
+// for compatibility with std::unique_ptr<>'s interface, with which everyone is
+// already familiar.
 //
 // scoped_nsobject<> takes ownership of an object (in the constructor or in
 // reset()) by taking over the caller's existing ownership claim.  The caller
diff --git a/base/mac/scoped_typeref.h b/base/mac/scoped_typeref.h
index 4211414..eed5afb 100644
--- a/base/mac/scoped_typeref.h
+++ b/base/mac/scoped_typeref.h
@@ -11,7 +11,7 @@
 
 namespace base {
 
-// ScopedTypeRef<> is patterned after scoped_ptr<>, but maintains a ownership
+// ScopedTypeRef<> is patterned after std::unique_ptr<>, but maintains ownership
 // of a reference to any type that is maintained by Retain and Release methods.
 //
 // The Traits structure must provide the Retain and Release methods for type T.
@@ -67,6 +67,20 @@
       object_ = Traits::Retain(object_);
   }
 
+  // Without this, passing a ScopedTypeRef<A,TraitsX> to construct a
+  // ScopedTypeRef<A,TraitsY> would automatically cast down to an A, and then
+  // ASSUME ownership of A, when a retain is what was needed.
+  template<typename OtherTraits>
+  ScopedTypeRef(const ScopedTypeRef<T, OtherTraits>& that_with_other_traits)
+      : object_(that_with_other_traits.get()) {
+    if (object_)
+      object_ = Traits::Retain(object_);
+  }
+
+  ScopedTypeRef(ScopedTypeRef<T, Traits>&& that) : object_(that.object_) {
+    that.object_ = Traits::InvalidValue();
+  }
+
   ~ScopedTypeRef() {
     if (object_)
       Traits::Release(object_);
@@ -117,7 +131,7 @@
     object_ = temp;
   }
 
-  // ScopedTypeRef<>::release() is like scoped_ptr<>::release.  It is NOT
+  // ScopedTypeRef<>::release() is like std::unique_ptr<>::release.  It is NOT
   // a wrapper for Release().  To force a ScopedTypeRef<> object to call
   // Release(), use ScopedTypeRef<>::reset().
   T release() WARN_UNUSED_RESULT {
diff --git a/base/macros.h b/base/macros.h
index f971da9..2a82537 100644
--- a/base/macros.h
+++ b/base/macros.h
@@ -68,7 +68,7 @@
 // really sure you don't want to do anything with the return value of a function
 // that has been marked WARN_UNUSED_RESULT, wrap it with this. Example:
 //
-//   scoped_ptr<MyType> my_var = ...;
+//   std::unique_ptr<MyType> my_var = ...;
 //   if (TakeOwnership(my_var.get()) == SUCCESS)
 //     ignore_result(my_var.release());
 //
diff --git a/base/md5_unittest.cc b/base/md5_unittest.cc
index 3926b66..b27efe9 100644
--- a/base/md5_unittest.cc
+++ b/base/md5_unittest.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/md5.h"
+
 #include <string.h>
+
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
-#include "base/md5.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -66,7 +68,7 @@
 
 TEST(MD5, MD5SumLongData) {
   const int length = 10 * 1024 * 1024 + 1;
-  scoped_ptr<char[]> data(new char[length]);
+  std::unique_ptr<char[]> data(new char[length]);
 
   for (int i = 0; i < length; ++i)
     data[i] = i & 0xFF;
@@ -108,7 +110,7 @@
   MD5Init(&ctx);
 
   const int length = 10 * 1024 * 1024 + 1;
-  scoped_ptr<char[]> data(new char[length]);
+  std::unique_ptr<char[]> data(new char[length]);
 
   for (int i = 0; i < length; ++i)
     data[i] = i & 0xFF;
diff --git a/base/memory/aligned_memory.h b/base/memory/aligned_memory.h
index bb7bd87..d829011 100644
--- a/base/memory/aligned_memory.h
+++ b/base/memory/aligned_memory.h
@@ -26,9 +26,9 @@
 //   // ... later, to release the memory:
 //   AlignedFree(my_array);
 //
-// Or using scoped_ptr:
+// Or using unique_ptr:
 //
-//   scoped_ptr<float, AlignedFreeDeleter> my_array(
+//   std::unique_ptr<float, AlignedFreeDeleter> my_array(
 //       static_cast<float*>(AlignedAlloc(size, alignment)));
 
 #ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
@@ -104,8 +104,8 @@
 #endif
 }
 
-// Deleter for use with scoped_ptr. E.g., use as
-//   scoped_ptr<Foo, base::AlignedFreeDeleter> foo;
+// Deleter for use with unique_ptr. E.g., use as
+//   std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
 struct AlignedFreeDeleter {
   inline void operator()(void* ptr) const {
     AlignedFree(ptr);
diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc
index b89e341..abe0cf3 100644
--- a/base/memory/aligned_memory_unittest.cc
+++ b/base/memory/aligned_memory_unittest.cc
@@ -3,7 +3,9 @@
 // found in the LICENSE file.
 
 #include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
+
+#include <memory>
+
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -92,7 +94,7 @@
 }
 
 TEST(AlignedMemoryTest, ScopedDynamicAllocation) {
-  scoped_ptr<float, base::AlignedFreeDeleter> p(
+  std::unique_ptr<float, base::AlignedFreeDeleter> p(
       static_cast<float*>(base::AlignedAlloc(8, 8)));
   EXPECT_TRUE(p.get());
   EXPECT_ALIGNED(p.get(), 8);
diff --git a/base/memory/free_deleter.h b/base/memory/free_deleter.h
new file mode 100644
index 0000000..5604118
--- /dev/null
+++ b/base/memory/free_deleter.h
@@ -0,0 +1,25 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FREE_DELETER_H_
+#define BASE_MEMORY_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+//     static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+  inline void operator()(void* ptr) const {
+    free(ptr);
+  }
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_FREE_DELETER_H_
diff --git a/base/memory/ptr_util.h b/base/memory/ptr_util.h
new file mode 100644
index 0000000..8747ac9
--- /dev/null
+++ b/base/memory/ptr_util.h
@@ -0,0 +1,74 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PTR_UTIL_H_
+#define BASE_MEMORY_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+namespace base {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+  return std::unique_ptr<T>(ptr);
+}
+
+namespace internal {
+
+template <typename T>
+struct MakeUniqueResult {
+  using Scalar = std::unique_ptr<T>;
+};
+
+template <typename T>
+struct MakeUniqueResult<T[]> {
+  using Array = std::unique_ptr<T[]>;
+};
+
+template <typename T, size_t N>
+struct MakeUniqueResult<T[N]> {
+  using Invalid = void;
+};
+
+}  // namespace internal
+
+// Helper to construct an object wrapped in a std::unique_ptr. This is an
+// implementation of C++14's std::make_unique that can be used in Chrome.
+//
+// MakeUnique<T>(args) should be preferred over WrapUnique(new T(args)): bare
+// calls to `new` should be treated with scrutiny.
+//
+// Usage:
+//   // ptr is a std::unique_ptr<std::string>
+//   auto ptr = MakeUnique<std::string>("hello world!");
+//
+//   // arr is a std::unique_ptr<int[]>
+//   auto arr = MakeUnique<int[]>(5);
+
+// Overload for non-array types. Arguments are forwarded to T's constructor.
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Scalar MakeUnique(Args&&... args) {
+  return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+// Overload for array types of unknown bound, e.g. T[]. The array is allocated
+// with `new T[n]()` and value-initialized: note that this is distinct from
+// `new T[n]`, which default-initializes.
+template <typename T>
+typename internal::MakeUniqueResult<T>::Array MakeUnique(size_t size) {
+  return std::unique_ptr<T>(new typename std::remove_extent<T>::type[size]());
+}
+
+// Overload to reject array types of known bound, e.g. T[n].
+template <typename T, typename... Args>
+typename internal::MakeUniqueResult<T>::Invalid MakeUnique(Args&&... args) =
+    delete;
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PTR_UTIL_H_
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index e739514..96231af 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -5,8 +5,11 @@
 #ifndef BASE_MEMORY_REF_COUNTED_H_
 #define BASE_MEMORY_REF_COUNTED_H_
 
+#include <stddef.h>
+
 #include <cassert>
 #include <iosfwd>
+#include <type_traits>
 
 #include "base/atomic_ref_count.h"
 #include "base/base_export.h"
@@ -283,7 +286,9 @@
   }
 
   // Copy conversion constructor.
-  template <typename U>
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
   scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
     if (ptr_)
       AddRef(ptr_);
@@ -294,7 +299,9 @@
   scoped_refptr(scoped_refptr&& r) : ptr_(r.get()) { r.ptr_ = nullptr; }
 
   // Move conversion constructor.
-  template <typename U>
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
   scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.get()) {
     r.ptr_ = nullptr;
   }
@@ -357,30 +364,7 @@
     swap(&r.ptr_);
   }
 
- private:
-  template <typename U> friend class scoped_refptr;
-
-  // Implement "Safe Bool Idiom"
-  // https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Safe_bool
-  //
-  // Allow scoped_refptr<T> to be used in boolean expressions such as
-  //   if (ref_ptr_instance)
-  // But do not become convertible to a real bool (which is dangerous).
-  //   Implementation requires:
-  //     typedef Testable
-  //     operator Testable() const
-  //     operator==
-  //     operator!=
-  //
-  // == and != operators must be declared explicitly or dissallowed, as
-  // otherwise "ptr1 == ptr2" will compile but do the wrong thing (i.e., convert
-  // to Testable and then do the comparison).
-  //
-  // C++11 provides for "explicit operator bool()", however it is currently
-  // banned due to MSVS2013. https://chromium-cpp.appspot.com/#core-blacklist
-  typedef T* scoped_refptr::*Testable;
- public:
-  operator Testable() const { return ptr_ ? &scoped_refptr::ptr_ : nullptr; }
+  explicit operator bool() const { return ptr_ != nullptr; }
 
   template <typename U>
   bool operator==(const scoped_refptr<U>& rhs) const {
@@ -401,6 +385,10 @@
   T* ptr_;
 
  private:
+  // Friend required for move constructors that set r.ptr_ to null.
+  template <typename U>
+  friend class scoped_refptr;
+
   // Non-inline helpers to allow:
   //     class Opaque;
   //     extern template class scoped_refptr<Opaque>;
@@ -436,6 +424,16 @@
   return lhs == rhs.get();
 }
 
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t) {
+  return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t, const scoped_refptr<T>& rhs) {
+  return !static_cast<bool>(rhs);
+}
+
 template <typename T, typename U>
 bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
   return !operator==(lhs, rhs);
@@ -447,6 +445,16 @@
 }
 
 template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+  return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+  return !operator==(null, rhs);
+}
+
+template <typename T>
 std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
   return out << p.get();
 }
diff --git a/base/memory/ref_counted_delete_on_message_loop.h b/base/memory/ref_counted_delete_on_message_loop.h
index 84f80d8..de194e8 100644
--- a/base/memory/ref_counted_delete_on_message_loop.h
+++ b/base/memory/ref_counted_delete_on_message_loop.h
@@ -19,10 +19,8 @@
 // Sample usage:
 // class Foo : public RefCountedDeleteOnMessageLoop<Foo> {
 //
-//   Foo(const scoped_refptr<SingleThreadTaskRunner>& loop)
-//       : RefCountedDeleteOnMessageLoop<Foo>(loop) {
-//     ...
-//   }
+//   Foo(scoped_refptr<SingleThreadTaskRunner> loop)
+//       : RefCountedDeleteOnMessageLoop<Foo>(std::move(loop)) {}
 //   ...
 //  private:
 //   friend class RefCountedDeleteOnMessageLoop<Foo>;
@@ -40,8 +38,8 @@
   // MessageLoop on the current thread can be acquired by calling
   // MessageLoop::current()->task_runner().
   RefCountedDeleteOnMessageLoop(
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner)
-      : task_runner_(task_runner) {
+      scoped_refptr<SingleThreadTaskRunner> task_runner)
+      : task_runner_(std::move(task_runner)) {
     DCHECK(task_runner_);
   }
 
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index 3f56b4a..7c4e07a 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -105,6 +105,22 @@
 int ScopedRefPtrCountDerived::constructor_count_ = 0;
 int ScopedRefPtrCountDerived::destructor_count_ = 0;
 
+class Other : public base::RefCounted<Other> {
+ private:
+  friend class base::RefCounted<Other>;
+
+  ~Other() {}
+};
+
+scoped_refptr<Other> Overloaded(scoped_refptr<Other> other) {
+  return other;
+}
+
+scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
+  return self_assign;
+}
+
+
 }  // end namespace
 
 TEST(RefCountedUnitTest, TestSelfAssignment) {
@@ -188,6 +204,16 @@
   EXPECT_NE(p2, p1);
 }
 
+TEST(RefCountedUnitTest, NullptrEquality) {
+  scoped_refptr<SelfAssign> ptr_to_an_instance(new SelfAssign);
+  scoped_refptr<SelfAssign> ptr_to_nullptr;
+
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+  EXPECT_EQ(nullptr, ptr_to_nullptr);
+  EXPECT_EQ(ptr_to_nullptr, nullptr);
+}
+
 TEST(RefCountedUnitTest, ConvertibleEquality) {
   scoped_refptr<Derived> p1(new Derived);
   scoped_refptr<SelfAssign> p2;
@@ -461,3 +487,21 @@
   EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
 }
 
+TEST(RefCountedUnitTest, TestOverloadResolutionCopy) {
+  scoped_refptr<Derived> derived(new Derived);
+  scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(derived));
+
+  scoped_refptr<Other> other(new Other);
+  EXPECT_EQ(other, Overloaded(other));
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
+  scoped_refptr<Derived> derived(new Derived);
+  scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(std::move(derived)));
+
+  scoped_refptr<Other> other(new Other);
+  scoped_refptr<Other> other2(other);
+  EXPECT_EQ(other2, Overloaded(std::move(other)));
+}
diff --git a/base/memory/scoped_ptr.h b/base/memory/scoped_ptr.h
deleted file mode 100644
index 2d2c0ec..0000000
--- a/base/memory/scoped_ptr.h
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Scopers help you manage ownership of a pointer, helping you easily manage a
-// pointer within a scope, and automatically destroying the pointer at the end
-// of a scope.  There are two main classes you will use, which correspond to the
-// operators new/delete and new[]/delete[].
-//
-// Example usage (scoped_ptr<T>):
-//   {
-//     scoped_ptr<Foo> foo(new Foo("wee"));
-//   }  // foo goes out of scope, releasing the pointer with it.
-//
-//   {
-//     scoped_ptr<Foo> foo;          // No pointer managed.
-//     foo.reset(new Foo("wee"));    // Now a pointer is managed.
-//     foo.reset(new Foo("wee2"));   // Foo("wee") was destroyed.
-//     foo.reset(new Foo("wee3"));   // Foo("wee2") was destroyed.
-//     foo->Method();                // Foo::Method() called.
-//     foo.get()->Method();          // Foo::Method() called.
-//     SomeFunc(foo.release());      // SomeFunc takes ownership, foo no longer
-//                                   // manages a pointer.
-//     foo.reset(new Foo("wee4"));   // foo manages a pointer again.
-//     foo.reset();                  // Foo("wee4") destroyed, foo no longer
-//                                   // manages a pointer.
-//   }  // foo wasn't managing a pointer, so nothing was destroyed.
-//
-// Example usage (scoped_ptr<T[]>):
-//   {
-//     scoped_ptr<Foo[]> foo(new Foo[100]);
-//     foo.get()->Method();  // Foo::Method on the 0th element.
-//     foo[10].Method();     // Foo::Method on the 10th element.
-//   }
-//
-// Scopers are testable as booleans:
-//   {
-//     scoped_ptr<Foo> foo;
-//     if (!foo)
-//       foo.reset(new Foo());
-//     if (foo)
-//       LOG(INFO) << "This code is reached."
-//   }
-//
-// These scopers also implement part of the functionality of C++11 unique_ptr
-// in that they are "movable but not copyable."  You can use the scopers in
-// the parameter and return types of functions to signify ownership transfer
-// in to and out of a function.  When calling a function that has a scoper
-// as the argument type, it must be called with an rvalue of a scoper, which
-// can be created by using std::move(), or the result of another function that
-// generates a temporary; passing by copy will NOT work.  Here is an example
-// using scoped_ptr:
-//
-//   void TakesOwnership(scoped_ptr<Foo> arg) {
-//     // Do something with arg.
-//   }
-//   scoped_ptr<Foo> CreateFoo() {
-//     // No need for calling std::move() for returning a move-only value, or
-//     // when you already have an rvalue as we do here.
-//     return scoped_ptr<Foo>(new Foo("new"));
-//   }
-//   scoped_ptr<Foo> PassThru(scoped_ptr<Foo> arg) {
-//     return arg;
-//   }
-//
-//   {
-//     scoped_ptr<Foo> ptr(new Foo("yay"));  // ptr manages Foo("yay").
-//     TakesOwnership(std::move(ptr));       // ptr no longer owns Foo("yay").
-//     scoped_ptr<Foo> ptr2 = CreateFoo();   // ptr2 owns the return Foo.
-//     scoped_ptr<Foo> ptr3 =                // ptr3 now owns what was in ptr2.
-//         PassThru(std::move(ptr2));        // ptr2 is correspondingly nullptr.
-//   }
-//
-// Notice that if you do not call std::move() when returning from PassThru(), or
-// when invoking TakesOwnership(), the code will not compile because scopers
-// are not copyable; they only implement move semantics which require calling
-// the std::move() function to signify a destructive transfer of state.
-// CreateFoo() is different though because we are constructing a temporary on
-// the return line and thus can avoid needing to call std::move().
-//
-// The conversion move-constructor properly handles upcast in initialization,
-// i.e. you can use a scoped_ptr<Child> to initialize a scoped_ptr<Parent>:
-//
-//   scoped_ptr<Foo> foo(new Foo());
-//   scoped_ptr<FooParent> parent(std::move(foo));
-
-#ifndef BASE_MEMORY_SCOPED_PTR_H_
-#define BASE_MEMORY_SCOPED_PTR_H_
-
-// This is an implementation designed to match the anticipated future TR2
-// implementation of the scoped_ptr class.
-
-// TODO(dcheng): Clean up these headers, but there are likely lots of existing
-// IWYU violations.
-#include <stddef.h>
-#include <stdlib.h>
-
-#include <iosfwd>
-#include <memory>
-#include <type_traits>
-#include <utility>
-
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/move.h"
-#include "build/build_config.h"
-
-namespace base {
-
-// Function object which invokes 'free' on its parameter, which must be
-// a pointer. Can be used to store malloc-allocated pointers in scoped_ptr:
-//
-// scoped_ptr<int, base::FreeDeleter> foo_ptr(
-//     static_cast<int*>(malloc(sizeof(int))));
-struct FreeDeleter {
-  inline void operator()(void* ptr) const {
-    free(ptr);
-  }
-};
-
-}  // namespace base
-
-template <typename T, typename D = std::default_delete<T>>
-using scoped_ptr = std::unique_ptr<T, D>;
-
-// A function to convert T* into scoped_ptr<T>
-// Doing e.g. make_scoped_ptr(new FooBarBaz<type>(arg)) is a shorter notation
-// for scoped_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
-template <typename T>
-scoped_ptr<T> make_scoped_ptr(T* ptr) {
-  return scoped_ptr<T>(ptr);
-}
-
-#endif  // BASE_MEMORY_SCOPED_PTR_H_
diff --git a/base/memory/scoped_vector.h b/base/memory/scoped_vector.h
index 6730612..adbab8c 100644
--- a/base/memory/scoped_vector.h
+++ b/base/memory/scoped_vector.h
@@ -7,10 +7,10 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/move.h"
 #include "base/stl_util.h"
 
@@ -69,7 +69,7 @@
   reference back() { return v_.back(); }
 
   void push_back(T* elem) { v_.push_back(elem); }
-  void push_back(scoped_ptr<T> elem) { v_.push_back(elem.release()); }
+  void push_back(std::unique_ptr<T> elem) { v_.push_back(elem.release()); }
 
   void pop_back() {
     DCHECK(!empty());
@@ -110,7 +110,7 @@
     return v_.insert(position, x);
   }
 
-  iterator insert(iterator position, scoped_ptr<T> x) {
+  iterator insert(iterator position, std::unique_ptr<T> x) {
     return v_.insert(position, x.release());
   }
 
diff --git a/base/memory/scoped_vector_unittest.cc b/base/memory/scoped_vector_unittest.cc
index 8638ece..ea3dcdc 100644
--- a/base/memory/scoped_vector_unittest.cc
+++ b/base/memory/scoped_vector_unittest.cc
@@ -4,12 +4,12 @@
 
 #include "base/memory/scoped_vector.h"
 
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
 #include "base/callback.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
@@ -112,7 +112,7 @@
 
  private:
   LifeCycleState life_cycle_state_;
-  scoped_ptr<LifeCycleObject> constructed_life_cycle_object_;
+  std::unique_ptr<LifeCycleObject> constructed_life_cycle_object_;
 
   DISALLOW_COPY_AND_ASSIGN(LifeCycleWatcher);
 };
@@ -325,7 +325,7 @@
 // Assertions for push_back(scoped_ptr).
 TEST(ScopedVectorTest, PushBackScopedPtr) {
   int delete_counter = 0;
-  scoped_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
+  std::unique_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
   EXPECT_EQ(0, delete_counter);
   {
     ScopedVector<DeleteCounter> v;
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
index 13238aa..257b9ae 100644
--- a/base/memory/shared_memory.h
+++ b/base/memory/shared_memory.h
@@ -32,10 +32,7 @@
 struct BASE_EXPORT SharedMemoryCreateOptions {
   SharedMemoryCreateOptions();
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The type of OS primitive that should back the SharedMemory object.
-  SharedMemoryHandle::Type type;
-#else
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
   // DEPRECATED (crbug.com/345734):
   // If NULL, the object is anonymous.  This pointer is owned by the caller
   // and must live through the call to Create().
@@ -47,7 +44,7 @@
   // shared memory must not exist.  This flag is meaningless unless
   // name_deprecated is non-NULL.
   bool open_existing_deprecated;
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS))
 
   // Size of the shared memory object to be created.
   // When opening an existing object, this has no effect.
@@ -58,6 +55,12 @@
 
   // If true, the file can be shared read-only to a process.
   bool share_read_only;
+
+#if defined(OS_WIN)
+  // If true, creates a file mapping without a name or proper ACLs. This is a
+  // stop-gap measure during investigation of https://crbug.com/585013.
+  bool create_without_name_or_permissions = false;
+#endif
 };
 
 // Platform abstraction for shared memory.  Provides a C++ wrapper
@@ -102,7 +105,7 @@
   // The caller is responsible for destroying the duplicated OS primitive.
   static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
 
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
   // This method requires that the SharedMemoryHandle is backed by a POSIX fd.
   static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
 #endif
@@ -123,16 +126,6 @@
   // Returns true on success and false on failure.
   bool CreateAndMapAnonymous(size_t size);
 
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // These two methods are analogs of CreateAndMapAnonymous and CreateAnonymous
-  // that force the underlying OS primitive to be a POSIX fd. Do not add new
-  // uses of these methods unless absolutely necessary, since constructing a
-  // fd-backed SharedMemory object frequently takes 100ms+.
-  // http://crbug.com/466437.
-  bool CreateAndMapAnonymousPosix(size_t size);
-  bool CreateAnonymousPosix(size_t size);
-#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
-
   // Creates an anonymous shared memory segment of size size.
   // Returns true on success and false on failure.
   bool CreateAnonymous(size_t size) {
@@ -257,12 +250,11 @@
   }
 
  private:
-#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID)
+#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
+    !(defined(OS_MACOSX) && !defined(OS_IOS))
   bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly);
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
   bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
 #endif
-#endif  // defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID)
   enum ShareMode {
     SHARE_READONLY,
     SHARE_CURRENT_MODE,
@@ -281,12 +273,6 @@
 #elif defined(OS_MACOSX) && !defined(OS_IOS)
   // The OS primitive that backs the shared memory region.
   SharedMemoryHandle shm_;
-
-  // The mechanism by which the memory is mapped. Only valid if |memory_| is not
-  // |nullptr|.
-  SharedMemoryHandle::Type mapped_memory_mechanism_;
-
-  int readonly_mapped_file_;
 #elif defined(OS_POSIX)
   int                mapped_file_;
   int                readonly_mapped_file_;
diff --git a/base/memory/shared_memory_android.cc b/base/memory/shared_memory_android.cc
index dfc8e6f..5ac6776 100644
--- a/base/memory/shared_memory_android.cc
+++ b/base/memory/shared_memory_android.cc
@@ -57,13 +57,13 @@
   return true;
 }
 
-bool SharedMemory::Delete(const std::string& /* name */) {
+bool SharedMemory::Delete(const std::string&) {
   // Like on Windows, this is intentionally returning true as ashmem will
   // automatically releases the resource when all FDs on it are closed.
   return true;
 }
 
-bool SharedMemory::Open(const std::string& /* name */, bool /* read_only */) {
+bool SharedMemory::Open(const std::string&, bool /*read_only*/) {
   // ashmem doesn't support name mapping
   NOTIMPLEMENTED();
   return false;
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
index 5befcdd..8eff26b 100644
--- a/base/memory/shared_memory_handle.h
+++ b/base/memory/shared_memory_handle.h
@@ -14,9 +14,7 @@
 #include "base/process/process_handle.h"
 #elif defined(OS_MACOSX) && !defined(OS_IOS)
 #include <mach/mach.h>
-#include <sys/types.h>
 #include "base/base_export.h"
-#include "base/file_descriptor_posix.h"
 #include "base/macros.h"
 #include "base/process/process_handle.h"
 #elif defined(OS_POSIX)
@@ -87,32 +85,9 @@
 #else
 class BASE_EXPORT SharedMemoryHandle {
  public:
-  // The values of these enums must not change, as they are used by the
-  // histogram OSX.SharedMemory.Mechanism.
-  enum Type {
-    // The SharedMemoryHandle is backed by a POSIX fd.
-    POSIX,
-    // The SharedMemoryHandle is backed by the Mach primitive "memory object".
-    MACH,
-  };
-  static const int TypeMax = 2;
-
-  // The format that should be used to transmit |Type| over the wire.
-  typedef int TypeWireFormat;
-
   // The default constructor returns an invalid SharedMemoryHandle.
   SharedMemoryHandle();
 
-  // Constructs a SharedMemoryHandle backed by the components of a
-  // FileDescriptor. The newly created instance has the same ownership semantics
-  // as base::FileDescriptor. This typically means that the SharedMemoryHandle
-  // takes ownership of the |fd| if |auto_close| is true. Unfortunately, it's
-  // common for existing code to make shallow copies of SharedMemoryHandle, and
-  // the one that is finally passed into a base::SharedMemory is the one that
-  // "consumes" the fd.
-  explicit SharedMemoryHandle(const base::FileDescriptor& file_descriptor);
-  SharedMemoryHandle(int fd, bool auto_close);
-
   // Makes a Mach-based SharedMemoryHandle of the given size. On error,
   // subsequent calls to IsValid() return false.
   explicit SharedMemoryHandle(mach_vm_size_t size);
@@ -138,28 +113,16 @@
   bool operator==(const SharedMemoryHandle& handle) const;
   bool operator!=(const SharedMemoryHandle& handle) const;
 
-  // Returns the type.
-  Type GetType() const;
-
   // Whether the underlying OS primitive is valid. Once the SharedMemoryHandle
   // is backed by a valid OS primitive, it becomes immutable.
   bool IsValid() const;
 
-  // Sets the POSIX fd backing the SharedMemoryHandle. Requires that the
-  // SharedMemoryHandle be backed by a POSIX fd.
-  void SetFileHandle(int fd, bool auto_close);
-
-  // This method assumes that the SharedMemoryHandle is backed by a POSIX fd.
-  // This is eventually no longer going to be true, so please avoid adding new
-  // uses of this method.
-  const FileDescriptor GetFileDescriptor() const;
-
   // Exposed so that the SharedMemoryHandle can be transported between
   // processes.
   mach_port_t GetMemoryObject() const;
 
   // Returns false on a failure to determine the size. On success, populates the
-  // output variable |size|.
+  // output variable |size|. Returns 0 if the handle is invalid.
   bool GetSize(size_t* size) const;
 
   // The SharedMemoryHandle must be valid.
@@ -178,31 +141,21 @@
   // Shared code between copy constructor and operator=.
   void CopyRelevantData(const SharedMemoryHandle& handle);
 
-  Type type_;
+  mach_port_t memory_object_ = MACH_PORT_NULL;
 
-  // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
-  // mach port. |type_| determines the backing member.
-  union {
-    FileDescriptor file_descriptor_;
+  // The size of the shared memory region when |type_| is MACH. Only
+  // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+  mach_vm_size_t size_ = 0;
 
-    struct {
-      mach_port_t memory_object_;
+  // The pid of the process in which |memory_object_| is usable. Only
+  // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+  base::ProcessId pid_ = 0;
 
-      // The size of the shared memory region when |type_| is MACH. Only
-      // relevant if |memory_object_| is not |MACH_PORT_NULL|.
-      mach_vm_size_t size_;
-
-      // The pid of the process in which |memory_object_| is usable. Only
-      // relevant if |memory_object_| is not |MACH_PORT_NULL|.
-      base::ProcessId pid_;
-
-      // Whether passing this object as a parameter to an IPC message passes
-      // ownership of |memory_object_| to the IPC stack. This is meant to mimic
-      // the behavior of the |auto_close| parameter of FileDescriptor.
-      // Defaults to |false|.
-      bool ownership_passes_to_ipc_;
-    };
-  };
+  // Whether passing this object as a parameter to an IPC message passes
+  // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+  // the behavior of the |auto_close| parameter of FileDescriptor.
+  // Defaults to |false|.
+  bool ownership_passes_to_ipc_ = false;
 };
 #endif
 
diff --git a/base/memory/shared_memory_handle_mac.cc b/base/memory/shared_memory_handle_mac.cc
index 600d2bb..ad470be 100644
--- a/base/memory/shared_memory_handle_mac.cc
+++ b/base/memory/shared_memory_handle_mac.cc
@@ -14,22 +14,9 @@
 
 namespace base {
 
-static_assert(sizeof(SharedMemoryHandle::Type) <=
-                  sizeof(SharedMemoryHandle::TypeWireFormat),
-              "Size of enum SharedMemoryHandle::Type exceeds size of type "
-              "transmitted over wire.");
-
-SharedMemoryHandle::SharedMemoryHandle() : type_(POSIX), file_descriptor_() {}
-
-SharedMemoryHandle::SharedMemoryHandle(
-    const base::FileDescriptor& file_descriptor)
-    : type_(POSIX), file_descriptor_(file_descriptor) {}
-
-SharedMemoryHandle::SharedMemoryHandle(int fd, bool auto_close)
-    : type_(POSIX), file_descriptor_(fd, auto_close) {}
+SharedMemoryHandle::SharedMemoryHandle() {}
 
 SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
-  type_ = MACH;
   mach_port_t named_right;
   kern_return_t kr = mach_make_memory_entry_64(
       mach_task_self(),
@@ -52,14 +39,12 @@
 SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
                                        mach_vm_size_t size,
                                        base::ProcessId pid)
-    : type_(MACH),
-      memory_object_(memory_object),
+    : memory_object_(memory_object),
       size_(size),
       pid_(pid),
       ownership_passes_to_ipc_(false) {}
 
-SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle)
-    : type_(handle.type_) {
+SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle) {
   CopyRelevantData(handle);
 }
 
@@ -68,104 +53,51 @@
   if (this == &handle)
     return *this;
 
-  type_ = handle.type_;
   CopyRelevantData(handle);
   return *this;
 }
 
 SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
-  switch (type_) {
-    case POSIX: {
-      if (!IsValid())
-        return SharedMemoryHandle();
+  if (!IsValid())
+    return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
 
-      int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
-      if (duped_fd < 0)
-        return SharedMemoryHandle();
-      return SharedMemoryHandle(duped_fd, true);
-    }
-    case MACH: {
-      if (!IsValid())
-        return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
-
-      // Increment the ref count.
-      kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
-                                            MACH_PORT_RIGHT_SEND, 1);
-      DCHECK_EQ(kr, KERN_SUCCESS);
-      SharedMemoryHandle handle(*this);
-      handle.SetOwnershipPassesToIPC(true);
-      return handle;
-    }
-  }
+  // Increment the ref count.
+  kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+                                        MACH_PORT_RIGHT_SEND, 1);
+  DCHECK_EQ(kr, KERN_SUCCESS);
+  SharedMemoryHandle handle(*this);
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
 }
 
 bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
   if (!IsValid() && !handle.IsValid())
     return true;
 
-  if (type_ != handle.type_)
-    return false;
-
-  switch (type_) {
-    case POSIX:
-      return file_descriptor_ == handle.file_descriptor_;
-    case MACH:
-      return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
-             pid_ == handle.pid_;
-  }
+  return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
+         pid_ == handle.pid_;
 }
 
 bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
   return !(*this == handle);
 }
 
-SharedMemoryHandle::Type SharedMemoryHandle::GetType() const {
-  return type_;
-}
-
 bool SharedMemoryHandle::IsValid() const {
-  switch (type_) {
-    case POSIX:
-      return file_descriptor_.fd >= 0;
-    case MACH:
-      return memory_object_ != MACH_PORT_NULL;
-  }
-}
-
-void SharedMemoryHandle::SetFileHandle(int fd, bool auto_close) {
-  DCHECK(!IsValid());
-  file_descriptor_.fd = fd;
-  file_descriptor_.auto_close = auto_close;
-  type_ = POSIX;
-}
-
-const FileDescriptor SharedMemoryHandle::GetFileDescriptor() const {
-  DCHECK_EQ(type_, POSIX);
-  return file_descriptor_;
+  return memory_object_ != MACH_PORT_NULL;
 }
 
 mach_port_t SharedMemoryHandle::GetMemoryObject() const {
-  DCHECK_EQ(type_, MACH);
   return memory_object_;
 }
 
 bool SharedMemoryHandle::GetSize(size_t* size) const {
-  if (!IsValid())
-    return false;
-
-  switch (type_) {
-    case SharedMemoryHandle::POSIX:
-      struct stat st;
-      if (fstat(file_descriptor_.fd, &st) != 0)
-        return false;
-      if (st.st_size < 0)
-        return false;
-      *size = st.st_size;
-      return true;
-    case SharedMemoryHandle::MACH:
-      *size = size_;
-      return true;
+  if (!IsValid()) {
+    *size = 0;
+    return true;
   }
+
+  *size = size_;
+  return true;
 }
 
 bool SharedMemoryHandle::MapAt(off_t offset,
@@ -173,72 +105,42 @@
                                void** memory,
                                bool read_only) {
   DCHECK(IsValid());
-  switch (type_) {
-    case SharedMemoryHandle::POSIX:
-      *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
-                     MAP_SHARED, file_descriptor_.fd, offset);
-
-      return *memory && *memory != reinterpret_cast<void*>(-1);
-    case SharedMemoryHandle::MACH:
-      // The flag VM_PROT_IS_MASK is only supported on OSX 10.7+.
-      DCHECK(mac::IsOSLionOrLater());
-
-      DCHECK_EQ(pid_, GetCurrentProcId());
-      kern_return_t kr = mach_vm_map(
-          mach_task_self(),
-          reinterpret_cast<mach_vm_address_t*>(memory),    // Output parameter
-          bytes,
-          0,                                               // Alignment mask
-          VM_FLAGS_ANYWHERE,
-          memory_object_,
-          offset,
-          FALSE,                                           // Copy
-          VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE),  // Current protection
-          VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK,  // Maximum protection
-          VM_INHERIT_NONE);
-      return kr == KERN_SUCCESS;
-  }
+  DCHECK_EQ(pid_, GetCurrentProcId());
+  kern_return_t kr = mach_vm_map(
+      mach_task_self(),
+      reinterpret_cast<mach_vm_address_t*>(memory),  // Output parameter
+      bytes,
+      0,  // Alignment mask
+      VM_FLAGS_ANYWHERE, memory_object_, offset,
+      FALSE,                                           // Copy
+      VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE),  // Current protection
+      VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK,  // Maximum protection
+      VM_INHERIT_NONE);
+  return kr == KERN_SUCCESS;
 }
 
 void SharedMemoryHandle::Close() const {
   if (!IsValid())
     return;
 
-  switch (type_) {
-    case POSIX:
-      if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
-        DPLOG(ERROR) << "Error closing fd.";
-      break;
-    case MACH:
-      kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
-      if (kr != KERN_SUCCESS)
-        DPLOG(ERROR) << "Error deallocating mach port: " << kr;
-      break;
-  }
+  kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+  if (kr != KERN_SUCCESS)
+    DPLOG(ERROR) << "Error deallocating mach port: " << kr;
 }
 
 void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
-  DCHECK_EQ(type_, MACH);
   ownership_passes_to_ipc_ = ownership_passes;
 }
 
 bool SharedMemoryHandle::OwnershipPassesToIPC() const {
-  DCHECK_EQ(type_, MACH);
   return ownership_passes_to_ipc_;
 }
 
 void SharedMemoryHandle::CopyRelevantData(const SharedMemoryHandle& handle) {
-  switch (type_) {
-    case POSIX:
-      file_descriptor_ = handle.file_descriptor_;
-      break;
-    case MACH:
-      memory_object_ = handle.memory_object_;
-      size_ = handle.size_;
-      pid_ = handle.pid_;
-      ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
-      break;
-  }
+  memory_object_ = handle.memory_object_;
+  size_ = handle.size_;
+  pid_ = handle.pid_;
+  ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
 }
 
 }  // namespace base
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
index acb43ea..d15c632 100644
--- a/base/memory/shared_memory_mac.cc
+++ b/base/memory/shared_memory_mac.cc
@@ -4,69 +4,26 @@
 
 #include "base/memory/shared_memory.h"
 
-#include <errno.h>
-#include <fcntl.h>
 #include <mach/mach_vm.h>
-#include <stddef.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
 
 #include "base/files/file_util.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
+#include "base/mac/foundation_util.h"
 #include "base/mac/mac_util.h"
 #include "base/mac/scoped_mach_vm.h"
 #include "base/metrics/field_trial.h"
 #include "base/metrics/histogram_macros.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/posix/safe_strerror.h"
 #include "base/process/process_metrics.h"
 #include "base/profiler/scoped_tracker.h"
 #include "base/scoped_generic.h"
 #include "base/strings/utf_string_conversions.h"
 #include "build/build_config.h"
 
-#if defined(OS_MACOSX)
-#include "base/mac/foundation_util.h"
-#endif  // OS_MACOSX
-
 namespace base {
 
 namespace {
 
-const char kTrialName[] = "MacMemoryMechanism";
-const char kTrialMach[] = "Mach";
-const char kTrialPosix[] = "Posix";
-
-SharedMemoryHandle::Type GetABTestMechanism() {
-  static bool found_group = false;
-  static SharedMemoryHandle::Type group = SharedMemoryHandle::MACH;
-
-  if (found_group)
-    return group;
-
-  const std::string group_name =
-      base::FieldTrialList::FindFullName(kTrialName);
-  if (group_name == kTrialMach) {
-    group = SharedMemoryHandle::MACH;
-    found_group = true;
-  } else if (group_name == kTrialPosix) {
-    group = SharedMemoryHandle::POSIX;
-    found_group = true;
-  } else {
-    group = SharedMemoryHandle::MACH;
-  }
-
-  return group;
-}
-
-// Emits a histogram entry indicating which type of SharedMemory was created.
-void EmitMechanism(SharedMemoryHandle::Type type) {
-  UMA_HISTOGRAM_ENUMERATION("OSX.SharedMemory.Mechanism", type,
-                            SharedMemoryHandle::TypeMax);
-}
-
 // Returns whether the operation succeeded.
 // |new_handle| is an output variable, populated on success. The caller takes
 // ownership of the underlying memory object.
@@ -110,100 +67,18 @@
   return true;
 }
 
-struct ScopedPathUnlinkerTraits {
-  static FilePath* InvalidValue() { return nullptr; }
-
-  static void Free(FilePath* path) {
-    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
-    // is fixed.
-    tracked_objects::ScopedTracker tracking_profile(
-        FROM_HERE_WITH_EXPLICIT_FUNCTION(
-            "466437 SharedMemory::Create::Unlink"));
-    if (unlink(path->value().c_str()))
-      PLOG(WARNING) << "unlink";
-  }
-};
-
-// Unlinks the FilePath when the object is destroyed.
-typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
-
-// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
-// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
-// options.share_read_only is true. |path| is populated with the location of
-// the file before it was unlinked.
-// Returns false if there's an unhandled failure.
-bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
-                                 ScopedFILE* fp,
-                                 ScopedFD* readonly_fd,
-                                 FilePath* path) {
-  // Q: Why not use the shm_open() etc. APIs?
-  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
-  FilePath directory;
-  ScopedPathUnlinker path_unlinker;
-  if (GetShmemTempDir(options.executable, &directory)) {
-    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
-    // is fixed.
-    tracked_objects::ScopedTracker tracking_profile(
-        FROM_HERE_WITH_EXPLICIT_FUNCTION(
-            "466437 SharedMemory::Create::OpenTemporaryFile"));
-    fp->reset(CreateAndOpenTemporaryFileInDir(directory, path));
-
-    // Deleting the file prevents anyone else from mapping it in (making it
-    // private), and prevents the need for cleanup (once the last fd is
-    // closed, it is truly freed).
-    if (*fp)
-      path_unlinker.reset(path);
-  }
-
-  if (*fp) {
-    if (options.share_read_only) {
-      // TODO(erikchen): Remove ScopedTracker below once
-      // http://crbug.com/466437 is fixed.
-      tracked_objects::ScopedTracker tracking_profile(
-          FROM_HERE_WITH_EXPLICIT_FUNCTION(
-              "466437 SharedMemory::Create::OpenReadonly"));
-      // Also open as readonly so that we can ShareReadOnlyToProcess.
-      readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
-      if (!readonly_fd->is_valid()) {
-        DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
-        fp->reset();
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
 }  // namespace
 
 SharedMemoryCreateOptions::SharedMemoryCreateOptions()
-    : type(SharedMemoryHandle::MACH),
-      size(0),
+    : size(0),
       executable(false),
-      share_read_only(false) {
-  if (mac::IsOSLionOrLater()) {
-    // A/B test the mechanism. Once the experiment is over, this will always be
-    // set to SharedMemoryHandle::MACH.
-    // http://crbug.com/547261
-    type = GetABTestMechanism();
-  } else {
-    // Mach shared memory isn't supported on OSX 10.6 or older.
-    type = SharedMemoryHandle::POSIX;
-  }
-}
+      share_read_only(false) {}
 
 SharedMemory::SharedMemory()
-    : mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
-      readonly_mapped_file_(-1),
-      mapped_size_(0),
-      memory_(NULL),
-      read_only_(false),
-      requested_size_(0) {}
+    : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
 
 SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
     : shm_(handle),
-      mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
-      readonly_mapped_file_(-1),
       mapped_size_(0),
       memory_(NULL),
       read_only_(read_only),
@@ -231,7 +106,8 @@
 
 // static
 size_t SharedMemory::GetHandleLimit() {
-  return GetMaxFds();
+  // This should be effectively unlimited on OS X.
+  return 10000;
 }
 
 // static
@@ -240,27 +116,10 @@
   return handle.Duplicate();
 }
 
-// static
-int SharedMemory::GetFdFromSharedMemoryHandle(
-    const SharedMemoryHandle& handle) {
-  return handle.GetFileDescriptor().fd;
-}
-
 bool SharedMemory::CreateAndMapAnonymous(size_t size) {
   return CreateAnonymous(size) && Map(size);
 }
 
-bool SharedMemory::CreateAndMapAnonymousPosix(size_t size) {
-  return CreateAnonymousPosix(size) && Map(size);
-}
-
-bool SharedMemory::CreateAnonymousPosix(size_t size) {
-  SharedMemoryCreateOptions options;
-  options.type = SharedMemoryHandle::POSIX;
-  options.size = size;
-  return Create(options);
-}
-
 // static
 bool SharedMemory::GetSizeFromSharedMemoryHandle(
     const SharedMemoryHandle& handle,
@@ -282,44 +141,9 @@
   if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
     return false;
 
-  EmitMechanism(options.type);
-
-  if (options.type == SharedMemoryHandle::MACH) {
-    shm_ = SharedMemoryHandle(options.size);
-    requested_size_ = options.size;
-    return shm_.IsValid();
-  }
-
-  // This function theoretically can block on the disk. Both profiling of real
-  // users and local instrumentation shows that this is a real problem.
-  // https://code.google.com/p/chromium/issues/detail?id=466437
-  base::ThreadRestrictions::ScopedAllowIO allow_io;
-
-  ScopedFILE fp;
-  ScopedFD readonly_fd;
-
-  FilePath path;
-  bool result = CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
-  if (!result)
-    return false;
-
-  if (!fp) {
-    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
-    return false;
-  }
-
-  // Get current size.
-  struct stat stat;
-  if (fstat(fileno(fp.get()), &stat) != 0)
-    return false;
-  const size_t current_size = stat.st_size;
-  if (current_size != options.size) {
-    if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
-      return false;
-  }
+  shm_ = SharedMemoryHandle(options.size);
   requested_size_ = options.size;
-
-  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+  return shm_.IsValid();
 }
 
 bool SharedMemory::MapAt(off_t offset, size_t bytes) {
@@ -335,7 +159,6 @@
     mapped_size_ = bytes;
     DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
                       (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
-    mapped_memory_mechanism_ = shm_.GetType();
   } else {
     memory_ = NULL;
   }
@@ -347,142 +170,49 @@
   if (memory_ == NULL)
     return false;
 
-  switch (mapped_memory_mechanism_) {
-    case SharedMemoryHandle::POSIX:
-      munmap(memory_, mapped_size_);
-      break;
-    case SharedMemoryHandle::MACH:
-      mach_vm_deallocate(mach_task_self(),
-                         reinterpret_cast<mach_vm_address_t>(memory_),
-                         mapped_size_);
-      break;
-  }
-
+  mach_vm_deallocate(mach_task_self(),
+                     reinterpret_cast<mach_vm_address_t>(memory_),
+                     mapped_size_);
   memory_ = NULL;
   mapped_size_ = 0;
   return true;
 }
 
 SharedMemoryHandle SharedMemory::handle() const {
-  switch (shm_.GetType()) {
-    case SharedMemoryHandle::POSIX:
-      return SharedMemoryHandle(shm_.GetFileDescriptor().fd, false);
-    case SharedMemoryHandle::MACH:
-      return shm_;
-  }
+  return shm_;
 }
 
 void SharedMemory::Close() {
   shm_.Close();
   shm_ = SharedMemoryHandle();
-  if (shm_.GetType() == SharedMemoryHandle::POSIX) {
-    if (readonly_mapped_file_ > 0) {
-      if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
-        PLOG(ERROR) << "close";
-      readonly_mapped_file_ = -1;
-    }
-  }
 }
 
-bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
-  DCHECK(!shm_.IsValid());
-  DCHECK_EQ(-1, readonly_mapped_file_);
-  if (fp == NULL)
-    return false;
-
-  // This function theoretically can block on the disk, but realistically
-  // the temporary files we create will just go into the buffer cache
-  // and be deleted before they ever make it out to disk.
-  base::ThreadRestrictions::ScopedAllowIO allow_io;
-
-  struct stat st = {};
-  if (fstat(fileno(fp.get()), &st))
-    NOTREACHED();
-  if (readonly_fd.is_valid()) {
-    struct stat readonly_st = {};
-    if (fstat(readonly_fd.get(), &readonly_st))
-      NOTREACHED();
-    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
-      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
-      return false;
-    }
-  }
-
-  int mapped_file = HANDLE_EINTR(dup(fileno(fp.get())));
-  if (mapped_file == -1) {
-    if (errno == EMFILE) {
-      LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
-      return false;
-    } else {
-      NOTREACHED() << "Call to dup failed, errno=" << errno;
-    }
-  }
-  shm_ = SharedMemoryHandle(mapped_file, false);
-  readonly_mapped_file_ = readonly_fd.release();
-
-  return true;
-}
-
-bool SharedMemory::ShareToProcessCommon(ProcessHandle /* process */,
+bool SharedMemory::ShareToProcessCommon(ProcessHandle /*process*/,
                                         SharedMemoryHandle* new_handle,
                                         bool close_self,
                                         ShareMode share_mode) {
-  if (shm_.GetType() == SharedMemoryHandle::MACH) {
-    DCHECK(shm_.IsValid());
+  DCHECK(shm_.IsValid());
 
-    bool success = false;
-    switch (share_mode) {
-      case SHARE_CURRENT_MODE:
-        *new_handle = shm_.Duplicate();
-        success = true;
-        break;
-      case SHARE_READONLY:
-        success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
-        break;
-    }
-
-    if (success)
-      new_handle->SetOwnershipPassesToIPC(true);
-
-    if (close_self) {
-      Unmap();
-      Close();
-    }
-
-    return success;
-  }
-
-  int handle_to_dup = -1;
+  bool success = false;
   switch (share_mode) {
     case SHARE_CURRENT_MODE:
-      handle_to_dup = shm_.GetFileDescriptor().fd;
+      *new_handle = shm_.Duplicate();
+      success = true;
       break;
     case SHARE_READONLY:
-      // We could imagine re-opening the file from /dev/fd, but that can't make
-      // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
-      CHECK_GE(readonly_mapped_file_, 0);
-      handle_to_dup = readonly_mapped_file_;
+      success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
       break;
   }
 
-  const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
-  if (new_fd < 0) {
-    if (close_self) {
-      Unmap();
-      Close();
-    }
-    DPLOG(ERROR) << "dup() failed.";
-    return false;
-  }
-
-  new_handle->SetFileHandle(new_fd, true);
+  if (success)
+    new_handle->SetOwnershipPassesToIPC(true);
 
   if (close_self) {
     Unmap();
     Close();
   }
 
-  return true;
+  return success;
 }
 
 }  // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
index bcb1f2b..c7d20ec 100644
--- a/base/memory/shared_memory_mac_unittest.cc
+++ b/base/memory/shared_memory_mac_unittest.cc
@@ -54,13 +54,13 @@
 }
 
 // Creates a new SharedMemory with the given |size|, filled with 'a'.
-scoped_ptr<SharedMemory> CreateSharedMemory(int size) {
+std::unique_ptr<SharedMemory> CreateSharedMemory(int size) {
   SharedMemoryHandle shm(size);
   if (!shm.IsValid()) {
     LOG(ERROR) << "Failed to make SharedMemoryHandle";
     return nullptr;
   }
-  scoped_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
   shared_memory->Map(size);
   memset(shared_memory->memory(), 'a', size);
   return shared_memory;
@@ -228,13 +228,10 @@
 // Tests that content written to shared memory in the server process can be read
 // by the child process.
 TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
   SetUpChild("MachBasedSharedMemoryClient");
 
-  scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
 
   // Send the underlying memory object to the client process.
   SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
@@ -263,10 +260,6 @@
 
 // Tests that mapping shared memory with an offset works correctly.
 TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
   SetUpChild("MachBasedSharedMemoryWithOffsetClient");
 
   SharedMemoryHandle shm(s_memory_size);
@@ -312,10 +305,6 @@
 // Tests that duplication and closing has the right effect on Mach reference
 // counts.
 TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
   mach_msg_type_number_t active_name_count = GetActiveNameCount();
 
   // Making a new SharedMemoryHandle increments the name count.
@@ -341,13 +330,10 @@
 
 // Tests that Mach shared memory can be mapped and unmapped.
 TEST_F(SharedMemoryMacMultiProcessTest, MachUnmapMap) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
   mach_msg_type_number_t active_name_count = GetActiveNameCount();
 
-  scoped_ptr<SharedMemory> shared_memory = CreateSharedMemory(s_memory_size);
+  std::unique_ptr<SharedMemory> shared_memory =
+      CreateSharedMemory(s_memory_size);
   ASSERT_TRUE(shared_memory->Unmap());
   ASSERT_TRUE(shared_memory->Map(s_memory_size));
   shared_memory.reset();
@@ -358,10 +344,6 @@
 // ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
 // as well.
 TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
   mach_msg_type_number_t active_name_count = GetActiveNameCount();
 
   // Making a new SharedMemoryHandle increments the name count.
@@ -370,7 +352,7 @@
   EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
 
   // Name count doesn't change when mapping the memory.
-  scoped_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
   shared_memory->Map(s_memory_size);
   EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
 
@@ -381,11 +363,8 @@
 
 // Tests that the read-only flag works.
 TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
-  scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
 
   SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
   ASSERT_TRUE(shm2.IsValid());
@@ -396,14 +375,11 @@
 
 // Tests that the method ShareToProcess() works.
 TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcess) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
   mach_msg_type_number_t active_name_count = GetActiveNameCount();
 
   {
-    scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
 
     SharedMemoryHandle shm2;
     ASSERT_TRUE(shared_memory->ShareToProcess(GetCurrentProcId(), &shm2));
@@ -421,11 +397,8 @@
 // Tests that the method ShareReadOnlyToProcess() creates a memory object that
 // is read only.
 TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonly) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
-  scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
 
   // Check the protection levels.
   int current_prot, max_prot;
@@ -464,14 +437,11 @@
 
 // Tests that the method ShareReadOnlyToProcess() doesn't leak.
 TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonlyLeak) {
-  // Mach-based SharedMemory isn't support on OSX 10.6.
-  if (mac::IsOSSnowLeopard())
-    return;
-
   mach_msg_type_number_t active_name_count = GetActiveNameCount();
 
   {
-    scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
 
     SharedMemoryHandle shm2;
     ASSERT_TRUE(
diff --git a/base/memory/shared_memory_posix.cc b/base/memory/shared_memory_posix.cc
index a05e866..7e94223 100644
--- a/base/memory/shared_memory_posix.cc
+++ b/base/memory/shared_memory_posix.cc
@@ -288,7 +288,7 @@
     }
     requested_size_ = options.size;
   }
-  if (fp == NULL) {
+  if (fp == nullptr) {
     PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
     FilePath dir = path.DirName();
     if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
@@ -405,7 +405,7 @@
 bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
   DCHECK_EQ(-1, mapped_file_);
   DCHECK_EQ(-1, readonly_mapped_file_);
-  if (fp == NULL)
+  if (fp == nullptr)
     return false;
 
   // This function theoretically can block on the disk, but realistically
@@ -464,7 +464,7 @@
 }
 #endif  // !defined(OS_ANDROID) && !defined(__ANDROID__)
 
-bool SharedMemory::ShareToProcessCommon(ProcessHandle /* process */,
+bool SharedMemory::ShareToProcessCommon(ProcessHandle,
                                         SharedMemoryHandle* new_handle,
                                         bool close_self,
                                         ShareMode share_mode) {
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
index cfb0b32..8251f60 100644
--- a/base/memory/shared_memory_unittest.cc
+++ b/base/memory/shared_memory_unittest.cc
@@ -2,13 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/memory/shared_memory.h"
+
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
+
 #include "base/atomicops.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/shared_memory.h"
 #include "base/memory/shared_memory_handle.h"
 #include "base/process/kill.h"
 #include "base/rand_util.h"
@@ -246,8 +248,8 @@
   int threadcounts[] = { 1, kNumThreads };
   for (size_t i = 0; i < arraysize(threadcounts); i++) {
     int numthreads = threadcounts[i];
-    scoped_ptr<PlatformThreadHandle[]> thread_handles;
-    scoped_ptr<MultipleThreadMain*[]> thread_delegates;
+    std::unique_ptr<PlatformThreadHandle[]> thread_handles;
+    std::unique_ptr<MultipleThreadMain* []> thread_delegates;
 
     thread_handles.reset(new PlatformThreadHandle[numthreads]);
     thread_delegates.reset(new MultipleThreadMain*[numthreads]);
@@ -279,8 +281,8 @@
   bool rv;
   const uint32_t kDataSize = 8192;
 
-  scoped_ptr<SharedMemory[]> memories(new SharedMemory[count]);
-  scoped_ptr<int*[]> pointers(new int*[count]);
+  std::unique_ptr<SharedMemory[]> memories(new SharedMemory[count]);
+  std::unique_ptr<int* []> pointers(new int*[count]);
   ASSERT_TRUE(memories.get());
   ASSERT_TRUE(pointers.get());
 
@@ -314,6 +316,8 @@
   }
 }
 
+// The Mach functionality is tested in shared_memory_mac_unittest.cc.
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
 TEST(SharedMemoryTest, ShareReadOnly) {
   StringPiece contents = "Hello World";
 
@@ -321,10 +325,6 @@
   SharedMemoryCreateOptions options;
   options.size = contents.size();
   options.share_read_only = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
   ASSERT_TRUE(writable_shmem.Create(options));
   ASSERT_TRUE(writable_shmem.Map(options.size));
   memcpy(writable_shmem.memory(), contents.data(), contents.size());
@@ -400,6 +400,7 @@
 #error Unexpected platform; write a test that tries to make 'handle' writable.
 #endif  // defined(OS_POSIX) || defined(OS_WIN)
 }
+#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS))
 
 TEST(SharedMemoryTest, ShareToSelf) {
   StringPiece contents = "Hello World";
@@ -473,7 +474,7 @@
   EXPECT_EQ(old_address, memory.memory());
 }
 
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
 // This test is not applicable for iOS (crbug.com/399384).
 #if !defined(OS_IOS)
 // Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
@@ -484,10 +485,6 @@
   SharedMemoryCreateOptions options;
   options.size = kTestSize;
   options.executable = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
 
   EXPECT_TRUE(shared_memory.Create(options));
   EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
@@ -521,10 +518,6 @@
   SharedMemory shared_memory;
   SharedMemoryCreateOptions options;
   options.size = kTestSize;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
   // Set a file mode creation mask that gives all permissions.
   ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
 
@@ -547,10 +540,6 @@
   SharedMemory shared_memory;
   SharedMemoryCreateOptions options;
   options.size = kTestSize;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
-  options.type = SharedMemoryHandle::POSIX;
-#endif
 
   // Set a file mode creation mask that gives all permissions.
   ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
@@ -567,7 +556,7 @@
 }
 #endif  // !defined(OS_ANDROID)
 
-#endif  // defined(OS_POSIX)
+#endif  // defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
 
 // Map() will return addresses which are aligned to the platform page size, this
 // varies from platform to platform though.  Since we'd like to advertise a
@@ -589,8 +578,8 @@
   EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
 
   // Map the current executable image to save us creating a new PE file on disk.
-  base::win::ScopedHandle file_handle(
-      ::CreateFile(path, GENERIC_READ, 0, nullptr, OPEN_EXISTING, 0, nullptr));
+  base::win::ScopedHandle file_handle(::CreateFile(
+      path, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr));
   EXPECT_TRUE(file_handle.IsValid());
   base::win::ScopedHandle section_handle(
       ::CreateFileMappingA(file_handle.Get(), nullptr,
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
index 006e1fd..3b8bcb1 100644
--- a/base/memory/weak_ptr.h
+++ b/base/memory/weak_ptr.h
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 // Weak pointers are pointers to an object that do not affect its lifetime,
-// and which may be invalidated (i.e. reset to NULL) by the object, or its
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
 // owner, at any time, most commonly when the object is about to be deleted.
 
 // Weak pointers are useful when an object needs to be accessed safely by one
@@ -70,6 +70,7 @@
 #ifndef BASE_MEMORY_WEAK_PTR_H_
 #define BASE_MEMORY_WEAK_PTR_H_
 
+#include <cstddef>
 #include <type_traits>
 
 #include "base/base_export.h"
@@ -199,8 +200,9 @@
 template <typename T>
 class WeakPtr : public internal::WeakPtrBase {
  public:
-  WeakPtr() : ptr_(NULL) {
-  }
+  WeakPtr() : ptr_(nullptr) {}
+
+  WeakPtr(std::nullptr_t) : ptr_(nullptr) {}
 
   // Allow conversion from U to T provided U "is a" T. Note that this
   // is separate from the (implicit) copy constructor.
@@ -208,20 +210,20 @@
   WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.ptr_) {
   }
 
-  T* get() const { return ref_.is_valid() ? ptr_ : NULL; }
+  T* get() const { return ref_.is_valid() ? ptr_ : nullptr; }
 
   T& operator*() const {
-    DCHECK(get() != NULL);
+    DCHECK(get() != nullptr);
     return *get();
   }
   T* operator->() const {
-    DCHECK(get() != NULL);
+    DCHECK(get() != nullptr);
     return get();
   }
 
   void reset() {
     ref_ = internal::WeakReference();
-    ptr_ = NULL;
+    ptr_ = nullptr;
   }
 
   // Implement "Safe Bool Idiom"
@@ -246,7 +248,7 @@
   typedef T* WeakPtr::*Testable;
 
  public:
-  operator Testable() const { return get() ? &WeakPtr::ptr_ : NULL; }
+  operator Testable() const { return get() ? &WeakPtr::ptr_ : nullptr; }
 
  private:
   // Explicitly declare comparison operators as required by the "Safe Bool
@@ -265,7 +267,7 @@
   }
 
   // This pointer is only valid when ref_.is_valid() is true.  Otherwise, its
-  // value is undefined (as opposed to NULL).
+  // value is undefined (as opposed to nullptr).
   T* ptr_;
 };
 
@@ -280,9 +282,7 @@
   explicit WeakPtrFactory(T* ptr) : ptr_(ptr) {
   }
 
-  ~WeakPtrFactory() {
-    ptr_ = NULL;
-  }
+  ~WeakPtrFactory() { ptr_ = nullptr; }
 
   WeakPtr<T> GetWeakPtr() {
     DCHECK(ptr_);
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
index d4fb969..df6c24f 100644
--- a/base/memory/weak_ptr_unittest.cc
+++ b/base/memory/weak_ptr_unittest.cc
@@ -4,11 +4,12 @@
 
 #include "base/memory/weak_ptr.h"
 
+#include <memory>
 #include <string>
 
 #include "base/bind.h"
+#include "base/debug/leak_annotations.h"
 #include "base/location.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/threading/thread.h"
@@ -17,6 +18,10 @@
 namespace base {
 namespace {
 
+WeakPtr<int> PassThru(WeakPtr<int> ptr) {
+  return ptr;
+}
+
 template <class T>
 class OffThreadObjectCreator {
  public:
@@ -113,7 +118,7 @@
 
   Target* DeRef(const Arrow* arrow) {
     WaitableEvent completion(true, false);
-    Target* result = NULL;
+    Target* result = nullptr;
     task_runner()->PostTask(FROM_HERE, base::Bind(&BackgroundThread::DoDeRef,
                                                   arrow, &result, &completion));
     completion.Wait();
@@ -193,13 +198,13 @@
 
 TEST(WeakPtrFactoryTest, OutOfScope) {
   WeakPtr<int> ptr;
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
   {
     int data;
     WeakPtrFactory<int> factory(&data);
     ptr = factory.GetWeakPtr();
   }
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
 }
 
 TEST(WeakPtrFactoryTest, Multiple) {
@@ -212,8 +217,8 @@
     EXPECT_EQ(&data, a.get());
     EXPECT_EQ(&data, b.get());
   }
-  EXPECT_EQ(NULL, a.get());
-  EXPECT_EQ(NULL, b.get());
+  EXPECT_EQ(nullptr, a.get());
+  EXPECT_EQ(nullptr, b.get());
 }
 
 TEST(WeakPtrFactoryTest, MultipleStaged) {
@@ -225,9 +230,9 @@
     {
       WeakPtr<int> b = factory.GetWeakPtr();
     }
-    EXPECT_TRUE(NULL != a.get());
+    EXPECT_NE(nullptr, a.get());
   }
-  EXPECT_EQ(NULL, a.get());
+  EXPECT_EQ(nullptr, a.get());
 }
 
 TEST(WeakPtrFactoryTest, Dereference) {
@@ -248,6 +253,11 @@
   EXPECT_EQ(ptr.get(), &data);
 }
 
+TEST(WeakPtrTest, ConstructFromNullptr) {
+  WeakPtr<int> ptr = PassThru(nullptr);
+  EXPECT_EQ(nullptr, ptr.get());
+}
+
 TEST(WeakPtrTest, SupportsWeakPtr) {
   Target target;
   WeakPtr<Target> ptr = target.AsWeakPtr();
@@ -298,7 +308,7 @@
   EXPECT_EQ(&data, ptr.get());
   EXPECT_TRUE(factory.HasWeakPtrs());
   factory.InvalidateWeakPtrs();
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
   EXPECT_FALSE(factory.HasWeakPtrs());
 
   // Test that the factory can create new weak pointers after a
@@ -308,7 +318,7 @@
   EXPECT_EQ(&data, ptr2.get());
   EXPECT_TRUE(factory.HasWeakPtrs());
   factory.InvalidateWeakPtrs();
-  EXPECT_EQ(NULL, ptr2.get());
+  EXPECT_EQ(nullptr, ptr2.get());
   EXPECT_FALSE(factory.HasWeakPtrs());
 }
 
@@ -326,7 +336,7 @@
   // Test that it is OK to create an object that supports WeakPtr on one thread,
   // but use it on another.  This tests that we do not trip runtime checks that
   // ensure that a WeakPtr is not used by multiple threads.
-  scoped_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
+  std::unique_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
   WeakPtr<Target> weak_ptr = target->AsWeakPtr();
   EXPECT_EQ(target.get(), weak_ptr.get());
 }
@@ -335,7 +345,7 @@
   // Test that it is OK to create an object that has a WeakPtr member on one
   // thread, but use it on another.  This tests that we do not trip runtime
   // checks that ensure that a WeakPtr is not used by multiple threads.
-  scoped_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
+  std::unique_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
   Target target;
   arrow->target = target.AsWeakPtr();
   EXPECT_EQ(&target, arrow->target.get());
@@ -408,14 +418,14 @@
   background.Start();
 
   Arrow arrow;
-  scoped_ptr<TargetWithFactory> target(new TargetWithFactory);
+  std::unique_ptr<TargetWithFactory> target(new TargetWithFactory);
 
   // Bind to main thread.
   arrow.target = target->factory.GetWeakPtr();
   EXPECT_EQ(target.get(), arrow.target.get());
 
   target->factory.InvalidateWeakPtrs();
-  EXPECT_EQ(NULL, arrow.target.get());
+  EXPECT_EQ(nullptr, arrow.target.get());
 
   arrow.target = target->factory.GetWeakPtr();
   // Re-bind to background thread.
@@ -478,7 +488,7 @@
     arrow.target = target.AsWeakPtr();
     background.CreateArrowFromArrow(&arrow_copy, &arrow);
   }
-  EXPECT_EQ(NULL, arrow_copy->target.get());
+  EXPECT_EQ(nullptr, arrow_copy->target.get());
   background.DeleteArrow(arrow_copy);
 }
 
@@ -578,7 +588,7 @@
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
 
-  scoped_ptr<Target> target(new Target());
+  std::unique_ptr<Target> target(new Target());
 
   // Main thread creates an arrow referencing the Target.
   Arrow arrow;
@@ -602,7 +612,7 @@
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
 
-  scoped_ptr<Target> target(new Target());
+  std::unique_ptr<Target> target(new Target());
 
   // Main thread creates an arrow referencing the Target, and references it, so
   // that it becomes bound to the thread.
@@ -621,7 +631,7 @@
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
 
-  scoped_ptr<Target> target(new Target());
+  std::unique_ptr<Target> target(new Target());
 
   // Main thread creates an arrow referencing the Target.
   Arrow arrow;
diff --git a/base/message_loop/incoming_task_queue.cc b/base/message_loop/incoming_task_queue.cc
index 9e3cdc9..97df54f 100644
--- a/base/message_loop/incoming_task_queue.cc
+++ b/base/message_loop/incoming_task_queue.cc
@@ -8,7 +8,6 @@
 
 #include "base/location.h"
 #include "base/message_loop/message_loop.h"
-#include "base/metrics/histogram.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
@@ -18,7 +17,7 @@
 
 namespace {
 
-#ifndef NDEBUG
+#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
 // Delays larger than this are often bogus, and a warning should be emitted in
 // debug builds to warn developers.  http://crbug.com/450045
 const int kTaskDelayWarningThresholdInSeconds =
@@ -27,17 +26,26 @@
 
 // Returns true if MessagePump::ScheduleWork() must be called one
 // time for every task that is added to the MessageLoop incoming queue.
-#if defined(OS_ANDROID)
 bool AlwaysNotifyPump(MessageLoop::Type type) {
+#if defined(OS_ANDROID)
   // The Android UI message loop needs to get notified each time a task is
-  // added to the incoming queue.
+  // added
+  // to the incoming queue.
   return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
-}
 #else
-bool AlwaysNotifyPump(MessageLoop::Type /* type */) {
+  (void)type;  // Avoid an unused warning.
   return false;
-}
 #endif
+}
+
+TimeTicks CalculateDelayedRuntime(TimeDelta delay) {
+  TimeTicks delayed_run_time;
+  if (delay > TimeDelta())
+    delayed_run_time = TimeTicks::Now() + delay;
+  else
+    DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
+  return delayed_run_time;
+}
 
 }  // namespace
 
@@ -60,9 +68,9 @@
       << "Requesting super-long task delay period of " << delay.InSeconds()
       << " seconds from here: " << from_here.ToString();
 
-  AutoLock locked(incoming_queue_lock_);
   PendingTask pending_task(
-      from_here, task, CalculateDelayedRuntime(delay), nestable);
+    from_here, task, CalculateDelayedRuntime(delay), nestable);
+  AutoLock locked(incoming_queue_lock_);
 #if defined(OS_WIN)
   // We consider the task needs a high resolution timer if the delay is
   // more than 0 and less than 32ms. This caps the relative error to
@@ -126,15 +134,6 @@
   DCHECK(!message_loop_);
 }
 
-TimeTicks IncomingTaskQueue::CalculateDelayedRuntime(TimeDelta delay) {
-  TimeTicks delayed_run_time;
-  if (delay > TimeDelta())
-    delayed_run_time = TimeTicks::Now() + delay;
-  else
-    DCHECK_EQ(delay.InMilliseconds(), 0) << "delay should not be negative";
-  return delayed_run_time;
-}
-
 bool IncomingTaskQueue::PostPendingTask(PendingTask* pending_task) {
   // Warning: Don't try to short-circuit, and handle this thread's tasks more
   // directly, as it could starve handling of foreign threads.  Put every task
diff --git a/base/message_loop/incoming_task_queue.h b/base/message_loop/incoming_task_queue.h
index e450aa1..608eca0 100644
--- a/base/message_loop/incoming_task_queue.h
+++ b/base/message_loop/incoming_task_queue.h
@@ -62,9 +62,6 @@
   friend class RefCountedThreadSafe<IncomingTaskQueue>;
   virtual ~IncomingTaskQueue();
 
-  // Calculates the time at which a PendingTask should run.
-  TimeTicks CalculateDelayedRuntime(TimeDelta delay);
-
   // Adds a task to |incoming_queue_|. The caller retains ownership of
   // |pending_task|, but this function will reset the value of
   // |pending_task->task|. This is needed to ensure that the posting call stack
diff --git a/base/message_loop/message_loop.cc b/base/message_loop/message_loop.cc
index eeed76a..a86e8e8 100644
--- a/base/message_loop/message_loop.cc
+++ b/base/message_loop/message_loop.cc
@@ -5,19 +5,21 @@
 #include "base/message_loop/message_loop.h"
 
 #include <algorithm>
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
 #include "base/compiler_specific.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/message_loop/message_pump_default.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/run_loop.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
 #include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
 #include "base/trace_event/trace_event.h"
 #include "base/tracked_objects.h"
@@ -102,7 +104,7 @@
 }
 #endif  // !defined(OS_NACL_SFI)
 
-scoped_ptr<MessagePump> ReturnPump(scoped_ptr<MessagePump> pump) {
+std::unique_ptr<MessagePump> ReturnPump(std::unique_ptr<MessagePump> pump) {
   return pump;
 }
 
@@ -119,6 +121,8 @@
 MessageLoop::DestructionObserver::~DestructionObserver() {
 }
 
+MessageLoop::NestingObserver::~NestingObserver() {}
+
 //------------------------------------------------------------------------------
 
 MessageLoop::MessageLoop(Type type)
@@ -126,7 +130,7 @@
   BindToCurrentThread();
 }
 
-MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
+MessageLoop::MessageLoop(std::unique_ptr<MessagePump> pump)
     : MessageLoop(TYPE_CUSTOM, Bind(&ReturnPump, Passed(&pump))) {
   BindToCurrentThread();
 }
@@ -205,7 +209,7 @@
 }
 
 // static
-scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
+std::unique_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
 // TODO(rvargas): Get rid of the OS guards.
 #if defined(USE_GLIB) && !defined(OS_NACL)
   typedef MessagePumpGlib MessagePumpForUI;
@@ -214,21 +218,22 @@
 #endif
 
 #if defined(OS_IOS) || defined(OS_MACOSX)
-#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(MessagePumpMac::Create())
 #elif defined(OS_NACL)
 // Currently NaCl doesn't have a UI MessageLoop.
 // TODO(abarth): Figure out if we need this.
-#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>()
 #else
-#define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
+#define MESSAGE_PUMP_UI std::unique_ptr<MessagePump>(new MessagePumpForUI())
 #endif
 
 #if defined(OS_MACOSX)
   // Use an OS native runloop on Mac to support timer coalescing.
-  #define MESSAGE_PUMP_DEFAULT \
-      scoped_ptr<MessagePump>(new MessagePumpCFRunLoop())
+#define MESSAGE_PUMP_DEFAULT \
+  std::unique_ptr<MessagePump>(new MessagePumpCFRunLoop())
 #else
-  #define MESSAGE_PUMP_DEFAULT scoped_ptr<MessagePump>(new MessagePumpDefault())
+#define MESSAGE_PUMP_DEFAULT \
+  std::unique_ptr<MessagePump>(new MessagePumpDefault())
 #endif
 
   if (type == MessageLoop::TYPE_UI) {
@@ -237,11 +242,11 @@
     return MESSAGE_PUMP_UI;
   }
   if (type == MessageLoop::TYPE_IO)
-    return scoped_ptr<MessagePump>(new MessagePumpForIO());
+    return std::unique_ptr<MessagePump>(new MessagePumpForIO());
 
 #if defined(OS_ANDROID)
   if (type == MessageLoop::TYPE_JAVA)
-    return scoped_ptr<MessagePump>(new MessagePumpForUI());
+    return std::unique_ptr<MessagePump>(new MessagePumpForUI());
 #endif
 
   DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
@@ -260,6 +265,16 @@
   destruction_observers_.RemoveObserver(destruction_observer);
 }
 
+void MessageLoop::AddNestingObserver(NestingObserver* observer) {
+  DCHECK_EQ(this, current());
+  nesting_observers_.AddObserver(observer);
+}
+
+void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
+  DCHECK_EQ(this, current());
+  nesting_observers_.RemoveObserver(observer);
+}
+
 void MessageLoop::PostTask(
     const tracked_objects::Location& from_here,
     const Closure& task) {
@@ -273,19 +288,6 @@
   task_runner_->PostDelayedTask(from_here, task, delay);
 }
 
-void MessageLoop::PostNonNestableTask(
-    const tracked_objects::Location& from_here,
-    const Closure& task) {
-  task_runner_->PostNonNestableTask(from_here, task);
-}
-
-void MessageLoop::PostNonNestableDelayedTask(
-    const tracked_objects::Location& from_here,
-    const Closure& task,
-    TimeDelta delay) {
-  task_runner_->PostNonNestableDelayedTask(from_here, task, delay);
-}
-
 void MessageLoop::Run() {
   DCHECK(pump_);
   RunLoop run_loop;
@@ -374,9 +376,10 @@
 //------------------------------------------------------------------------------
 
 // static
-scoped_ptr<MessageLoop> MessageLoop::CreateUnbound(
-    Type type, MessagePumpFactoryCallback pump_factory) {
-  return make_scoped_ptr(new MessageLoop(type, pump_factory));
+std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
+    Type type,
+    MessagePumpFactoryCallback pump_factory) {
+  return WrapUnique(new MessageLoop(type, pump_factory));
 }
 
 MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
@@ -572,6 +575,11 @@
 #endif
 }
 
+void MessageLoop::NotifyBeginNestedLoop() {
+  FOR_EACH_OBSERVER(NestingObserver, nesting_observers_,
+                    OnBeginNestedMessageLoop());
+}
+
 bool MessageLoop::DoWork() {
   if (!nestable_tasks_allowed_) {
     // Task can't be executed right now.
@@ -659,23 +667,22 @@
 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
                                      void(*deleter)(const void*),
                                      const void* object) {
-  PostNonNestableTask(from_here, Bind(deleter, object));
+  task_runner()->PostNonNestableTask(from_here, Bind(deleter, object));
 }
 
 void MessageLoop::ReleaseSoonInternal(
     const tracked_objects::Location& from_here,
     void(*releaser)(const void*),
     const void* object) {
-  PostNonNestableTask(from_here, Bind(releaser, object));
+  task_runner()->PostNonNestableTask(from_here, Bind(releaser, object));
 }
 
 #if !defined(OS_NACL)
 //------------------------------------------------------------------------------
 // MessageLoopForUI
 
-MessageLoopForUI::MessageLoopForUI(scoped_ptr<MessagePump> pump)
-    : MessageLoop(TYPE_UI, Bind(&ReturnPump, Passed(&pump))) {
-}
+MessageLoopForUI::MessageLoopForUI(std::unique_ptr<MessagePump> pump)
+    : MessageLoop(TYPE_UI, Bind(&ReturnPump, Passed(&pump))) {}
 
 #if defined(OS_ANDROID)
 void MessageLoopForUI::Start() {
@@ -714,15 +721,6 @@
 MessageLoopForIO::MessageLoopForIO() : MessageLoop(TYPE_IO) {}
 
 #if !defined(OS_NACL_SFI)
-void MessageLoopForIO::AddIOObserver(
-    MessageLoopForIO::IOObserver* io_observer) {
-  ToPumpIO(pump_.get())->AddIOObserver(io_observer);
-}
-
-void MessageLoopForIO::RemoveIOObserver(
-    MessageLoopForIO::IOObserver* io_observer) {
-  ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
-}
 
 #if defined(OS_WIN)
 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index c569aae..1e8b0bb 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -5,6 +5,7 @@
 #ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
 #define BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
 
+#include <memory>
 #include <queue>
 #include <string>
 
@@ -15,7 +16,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/incoming_task_queue.h"
 #include "base/message_loop/message_loop_task_runner.h"
 #include "base/message_loop/message_pump.h"
@@ -115,7 +115,7 @@
   explicit MessageLoop(Type type = TYPE_DEFAULT);
   // Creates a TYPE_CUSTOM MessageLoop with the supplied MessagePump, which must
   // be non-NULL.
-  explicit MessageLoop(scoped_ptr<MessagePump> pump);
+  explicit MessageLoop(std::unique_ptr<MessagePump> pump);
 
   ~MessageLoop() override;
 
@@ -124,7 +124,7 @@
 
   static void EnableHistogrammer(bool enable_histogrammer);
 
-  typedef scoped_ptr<MessagePump> (MessagePumpFactory)();
+  typedef std::unique_ptr<MessagePump>(MessagePumpFactory)();
   // Uses the given base::MessagePumpForUIFactory to override the default
   // MessagePump implementation for 'TYPE_UI'. Returns true if the factory
   // was successfully registered.
@@ -132,7 +132,8 @@
 
   // Creates the default MessagePump based on |type|. Caller owns return
   // value.
-  static scoped_ptr<MessagePump> CreateMessagePumpForType(Type type);
+  static std::unique_ptr<MessagePump> CreateMessagePumpForType(Type type);
+
   // A DestructionObserver is notified when the current MessageLoop is being
   // destroyed.  These observers are notified prior to MessageLoop::current()
   // being changed to return NULL.  This gives interested parties the chance to
@@ -157,6 +158,19 @@
   // DestructionObserver is receiving a notification callback.
   void RemoveDestructionObserver(DestructionObserver* destruction_observer);
 
+  // A NestingObserver is notified when a nested message loop begins. The
+  // observers are notified before the first task is processed.
+  class BASE_EXPORT NestingObserver {
+   public:
+    virtual void OnBeginNestedMessageLoop() = 0;
+
+   protected:
+    virtual ~NestingObserver();
+  };
+
+  void AddNestingObserver(NestingObserver* observer);
+  void RemoveNestingObserver(NestingObserver* observer);
+
   // NOTE: Deprecated; prefer task_runner() and the TaskRunner interfaces.
   // TODO(skyostil): Remove these functions (crbug.com/465354).
   //
@@ -186,13 +200,6 @@
                        const Closure& task,
                        TimeDelta delay);
 
-  void PostNonNestableTask(const tracked_objects::Location& from_here,
-                           const Closure& task);
-
-  void PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
-                                  const Closure& task,
-                                  TimeDelta delay);
-
   // A variant on PostTask that deletes the given object.  This is useful
   // if the object needs to live until the next run of the MessageLoop (for
   // example, deleting a RenderProcessHost from within an IPC callback is not
@@ -397,9 +404,9 @@
 
   //----------------------------------------------------------------------------
  protected:
-  scoped_ptr<MessagePump> pump_;
+  std::unique_ptr<MessagePump> pump_;
 
-  using MessagePumpFactoryCallback = Callback<scoped_ptr<MessagePump>()>;
+  using MessagePumpFactoryCallback = Callback<std::unique_ptr<MessagePump>()>;
 
   // Common protected constructor. Other constructors delegate the
   // initialization to this constructor.
@@ -430,7 +437,7 @@
   // thread the message loop runs on, before calling Run().
   // Before BindToCurrentThread() is called, only Post*Task() functions can
   // be called on the message loop.
-  static scoped_ptr<MessageLoop> CreateUnbound(
+  static std::unique_ptr<MessageLoop> CreateUnbound(
       Type type,
       MessagePumpFactoryCallback pump_factory);
 
@@ -473,6 +480,9 @@
   // If message_histogram_ is NULL, this is a no-op.
   void HistogramEvent(int event);
 
+  // Notify observers that a nested message loop is starting.
+  void NotifyBeginNestedLoop();
+
   // MessagePump::Delegate methods:
   bool DoWork() override;
   bool DoDelayedWork(TimeTicks* next_delayed_work_time) override;
@@ -507,6 +517,8 @@
 
   ObserverList<DestructionObserver> destruction_observers_;
 
+  ObserverList<NestingObserver> nesting_observers_;
+
   // A recursion block that prevents accidentally running additional tasks when
   // insider a (accidentally induced?) nested message pump.
   bool nestable_tasks_allowed_;
@@ -538,7 +550,7 @@
 
   // The task runner associated with this message loop.
   scoped_refptr<SingleThreadTaskRunner> task_runner_;
-  scoped_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
+  std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
 
   template <class T, class R> friend class base::subtle::DeleteHelperInternal;
   template <class T, class R> friend class base::subtle::ReleaseHelperInternal;
@@ -567,7 +579,7 @@
   MessageLoopForUI() : MessageLoop(TYPE_UI) {
   }
 
-  explicit MessageLoopForUI(scoped_ptr<MessagePump> pump);
+  explicit MessageLoopForUI(std::unique_ptr<MessagePump> pump);
 
   // Returns the MessageLoopForUI of the current thread.
   static MessageLoopForUI* current() {
@@ -646,12 +658,10 @@
 #if defined(OS_WIN)
   typedef MessagePumpForIO::IOHandler IOHandler;
   typedef MessagePumpForIO::IOContext IOContext;
-  typedef MessagePumpForIO::IOObserver IOObserver;
 #elif defined(OS_IOS)
   typedef MessagePumpIOSForIO::Watcher Watcher;
   typedef MessagePumpIOSForIO::FileDescriptorWatcher
       FileDescriptorWatcher;
-  typedef MessagePumpIOSForIO::IOObserver IOObserver;
 
   enum Mode {
     WATCH_READ = MessagePumpIOSForIO::WATCH_READ,
@@ -662,7 +672,6 @@
   typedef MessagePumpLibevent::Watcher Watcher;
   typedef MessagePumpLibevent::FileDescriptorWatcher
       FileDescriptorWatcher;
-  typedef MessagePumpLibevent::IOObserver IOObserver;
 
   enum Mode {
     WATCH_READ = MessagePumpLibevent::WATCH_READ,
@@ -671,9 +680,6 @@
   };
 #endif
 
-  void AddIOObserver(IOObserver* io_observer);
-  void RemoveIOObserver(IOObserver* io_observer);
-
 #if defined(OS_WIN)
   // Please see MessagePumpWin for definitions of these methods.
   void RegisterIOHandler(HANDLE file, IOHandler* handler);
diff --git a/base/message_loop/message_loop_task_runner_unittest.cc b/base/message_loop/message_loop_task_runner_unittest.cc
index 0442e7c..044350a 100644
--- a/base/message_loop/message_loop_task_runner_unittest.cc
+++ b/base/message_loop/message_loop_task_runner_unittest.cc
@@ -4,14 +4,16 @@
 
 #include "base/message_loop/message_loop_task_runner.h"
 
+#include <memory>
+
 #include "base/atomic_sequence_num.h"
 #include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/debug/leak_annotations.h"
 #include "base/message_loop/message_loop.h"
 #include "base/message_loop/message_loop_task_runner.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "testing/gtest/include/gtest/gtest.h"
 #include "testing/platform_test.h"
 
@@ -87,7 +89,7 @@
 
   static StaticAtomicSequenceNumber g_order;
 
-  scoped_ptr<MessageLoop> current_loop_;
+  std::unique_ptr<MessageLoop> current_loop_;
   Thread task_thread_;
 
  private:
@@ -197,6 +199,8 @@
 }
 
 TEST_F(MessageLoopTaskRunnerTest, PostTaskAndReply_DeadReplyLoopDoesNotDelete) {
+  // Annotate the scope as having memory leaks to suppress heapchecker reports.
+  ANNOTATE_SCOPED_MEMORY_LEAK;
   MessageLoop* task_run_on = NULL;
   MessageLoop* task_deleted_on = NULL;
   int task_delete_order = -1;
@@ -300,8 +304,8 @@
     MessageLoopTaskRunnerThreadingTest* test_;
   };
 
-  scoped_ptr<Thread> io_thread_;
-  scoped_ptr<Thread> file_thread_;
+  std::unique_ptr<Thread> io_thread_;
+  std::unique_ptr<Thread> file_thread_;
 
  private:
   mutable MessageLoop loop_;
@@ -327,7 +331,7 @@
 }
 
 TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadExits) {
-  scoped_ptr<Thread> test_thread(
+  std::unique_ptr<Thread> test_thread(
       new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
   test_thread->Start();
   scoped_refptr<SingleThreadTaskRunner> task_runner =
@@ -342,7 +346,7 @@
 TEST_F(MessageLoopTaskRunnerThreadingTest, PostTaskAfterThreadIsDeleted) {
   scoped_refptr<SingleThreadTaskRunner> task_runner;
   {
-    scoped_ptr<Thread> test_thread(
+    std::unique_ptr<Thread> test_thread(
         new Thread("MessageLoopTaskRunnerThreadingTest_Dummy"));
     test_thread->Start();
     task_runner = test_thread->task_runner();
diff --git a/base/message_loop/message_loop_test.cc b/base/message_loop/message_loop_test.cc
index ac50d64..4e45acb 100644
--- a/base/message_loop/message_loop_test.cc
+++ b/base/message_loop/message_loop_test.cc
@@ -91,7 +91,7 @@
 }  // namespace
 
 void RunTest_PostTask(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
   // Add tests to message loop
   scoped_refptr<Foo> foo(new Foo());
@@ -121,7 +121,7 @@
 }
 
 void RunTest_PostDelayedTask_Basic(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that PostDelayedTask results in a delayed task.
@@ -144,7 +144,7 @@
 }
 
 void RunTest_PostDelayedTask_InDelayOrder(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that two tasks with different delays run in the right order.
@@ -169,7 +169,7 @@
 }
 
 void RunTest_PostDelayedTask_InPostOrder(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that two tasks with the same delay run in the order in which they
@@ -199,7 +199,7 @@
 }
 
 void RunTest_PostDelayedTask_InPostOrder_2(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that a delayed task still runs after a normal tasks even if the
@@ -226,7 +226,7 @@
 }
 
 void RunTest_PostDelayedTask_InPostOrder_3(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that a delayed task still runs after a pile of normal tasks.  The key
@@ -254,7 +254,7 @@
 }
 
 void RunTest_PostDelayedTask_SharedTimer(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   // Test that the interval of the timer, used to run the next delayed task, is
@@ -321,7 +321,7 @@
   bool a_was_deleted = false;
   bool b_was_deleted = false;
   {
-    scoped_ptr<MessagePump> pump(factory());
+    std::unique_ptr<MessagePump> pump(factory());
     MessageLoop loop(std::move(pump));
     loop.PostTask(
         FROM_HERE, Bind(&RecordDeletionProbe::Run,
@@ -341,7 +341,7 @@
   bool b_was_deleted = false;
   bool c_was_deleted = false;
   {
-    scoped_ptr<MessagePump> pump(factory());
+    std::unique_ptr<MessagePump> pump(factory());
     MessageLoop loop(std::move(pump));
     // The scoped_refptr for each of the below is held either by the chained
     // RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
@@ -368,7 +368,7 @@
 }
 
 void RunTest_Nesting(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   int depth = 100;
@@ -378,6 +378,66 @@
   EXPECT_EQ(depth, 0);
 }
 
+// A NestingObserver that tracks the number of nested message loop starts it
+// has seen.
+class TestNestingObserver : public MessageLoop::NestingObserver {
+ public:
+  TestNestingObserver() {}
+  ~TestNestingObserver() override {}
+
+  int begin_nested_loop_count() const { return begin_nested_loop_count_; }
+
+  // MessageLoop::NestingObserver:
+  void OnBeginNestedMessageLoop() override { begin_nested_loop_count_++; }
+
+ private:
+  int begin_nested_loop_count_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(TestNestingObserver);
+};
+
+void ExpectOneBeginNestedLoop(TestNestingObserver* observer) {
+  EXPECT_EQ(1, observer->begin_nested_loop_count());
+}
+
+// Starts a nested message loop.
+void RunNestedLoop(TestNestingObserver* observer,
+                   const Closure& quit_outer_loop) {
+  // The nested loop hasn't started yet.
+  EXPECT_EQ(0, observer->begin_nested_loop_count());
+
+  MessageLoop::ScopedNestableTaskAllower allow(MessageLoop::current());
+  RunLoop nested_loop;
+  // Verify that by the time the first task is run the observer has seen the
+  // message loop begin.
+  MessageLoop::current()->PostTask(FROM_HERE,
+                                   Bind(&ExpectOneBeginNestedLoop, observer));
+  MessageLoop::current()->PostTask(FROM_HERE, nested_loop.QuitClosure());
+  nested_loop.Run();
+
+  // Quitting message loops doesn't change the begin count.
+  EXPECT_EQ(1, observer->begin_nested_loop_count());
+
+  quit_outer_loop.Run();
+}
+
+// Tests that a NestingObserver is notified when a nested message loop begins.
+void RunTest_NestingObserver(MessagePumpFactory factory) {
+  std::unique_ptr<MessagePump> pump(factory());
+  MessageLoop outer_loop(std::move(pump));
+
+  // Observe the outer loop for nested message loops beginning.
+  TestNestingObserver nesting_observer;
+  outer_loop.AddNestingObserver(&nesting_observer);
+
+  // Post a task that runs a nested message loop.
+  outer_loop.PostTask(FROM_HERE, Bind(&RunNestedLoop, &nesting_observer,
+                                      outer_loop.QuitWhenIdleClosure()));
+  outer_loop.Run();
+
+  outer_loop.RemoveNestingObserver(&nesting_observer);
+}
+
 enum TaskType {
   MESSAGEBOX,
   ENDDIALOG,
@@ -476,7 +536,7 @@
   order->RecordEnd(QUITMESSAGELOOP, cookie);
 }
 void RunTest_RecursiveDenial1(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
@@ -523,7 +583,7 @@
 }
 
 void RunTest_RecursiveDenial3(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   EXPECT_TRUE(MessageLoop::current()->NestableTasksAllowed());
@@ -564,7 +624,7 @@
 }
 
 void RunTest_RecursiveSupport1(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -597,18 +657,19 @@
 
 // Tests that non nestable tasks run in FIFO if there are no nested loops.
 void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
 
-  MessageLoop::current()->PostNonNestableTask(
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
       FROM_HERE,
       Bind(&OrderedFunc, &order, 1));
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&OrderedFunc, &order, 2));
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&QuitFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(FROM_HERE,
+                                                  Bind(&QuitFunc, &order, 3));
   MessageLoop::current()->Run();
 
   // FIFO order.
@@ -637,43 +698,30 @@
 }
 
 // Tests that non nestable tasks don't run when there's code in the call stack.
-void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory,
-                                     bool use_delayed) {
-  scoped_ptr<MessagePump> pump(factory());
+void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory) {
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
 
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&FuncThatPumps, &order, 1));
-  if (use_delayed) {
-    MessageLoop::current()->PostNonNestableDelayedTask(
-        FROM_HERE,
-        Bind(&OrderedFunc, &order, 2),
-        TimeDelta::FromMilliseconds(1));
-  } else {
-    MessageLoop::current()->PostNonNestableTask(
-        FROM_HERE,
-        Bind(&OrderedFunc, &order, 2));
-  }
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&OrderedFunc, &order, 3));
-  MessageLoop::current()->PostTask(
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 2));
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 3));
+  MessageLoop::current()->task_runner()->PostTask(
       FROM_HERE,
       Bind(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
-  MessageLoop::current()->PostTask(FROM_HERE,
-                                   Bind(&OrderedFunc, &order, 5));
-  if (use_delayed) {
-    MessageLoop::current()->PostNonNestableDelayedTask(
-        FROM_HERE,
-        Bind(&QuitFunc, &order, 6),
-        TimeDelta::FromMilliseconds(2));
-  } else {
-    MessageLoop::current()->PostNonNestableTask(
-        FROM_HERE,
-        Bind(&QuitFunc, &order, 6));
-  }
+  MessageLoop::current()->task_runner()->PostTask(
+      FROM_HERE,
+      Bind(&OrderedFunc, &order, 5));
+  MessageLoop::current()->task_runner()->PostNonNestableTask(
+      FROM_HERE,
+      Bind(&QuitFunc, &order, 6));
 
   MessageLoop::current()->Run();
 
@@ -707,7 +755,7 @@
 }
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_QuitNow(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -742,7 +790,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitTop(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -772,7 +820,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitNested(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -802,7 +850,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitBogus(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -835,7 +883,7 @@
 
 // Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
 void RunTest_RunLoopQuitDeep(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -904,7 +952,7 @@
 
 // Tests RunLoopQuit works before RunWithID.
 void RunTest_RunLoopQuitOrderBefore(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -925,7 +973,7 @@
 
 // Tests RunLoopQuit works during RunWithID.
 void RunTest_RunLoopQuitOrderDuring(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -952,7 +1000,7 @@
 
 // Tests RunLoopQuit works after RunWithID.
 void RunTest_RunLoopQuitOrderAfter(MessagePumpFactory factory) {
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
 
   TaskList order;
@@ -1010,7 +1058,7 @@
 // times to reproduce the bug.
 void RunTest_RecursivePosts(MessagePumpFactory factory) {
   const int kNumTimes = 1 << 17;
-  scoped_ptr<MessagePump> pump(factory());
+  std::unique_ptr<MessagePump> pump(factory());
   MessageLoop loop(std::move(pump));
   loop.PostTask(FROM_HERE, Bind(&PostNTasksThenQuit, kNumTimes));
   loop.Run();
diff --git a/base/message_loop/message_loop_test.h b/base/message_loop/message_loop_test.h
index 3d9889c..b7ae28e 100644
--- a/base/message_loop/message_loop_test.h
+++ b/base/message_loop/message_loop_test.h
@@ -28,12 +28,12 @@
 void RunTest_EnsureDeletion(MessagePumpFactory factory);
 void RunTest_EnsureDeletion_Chain(MessagePumpFactory factory);
 void RunTest_Nesting(MessagePumpFactory factory);
+void RunTest_NestingObserver(MessagePumpFactory factory);
 void RunTest_RecursiveDenial1(MessagePumpFactory factory);
 void RunTest_RecursiveDenial3(MessagePumpFactory factory);
 void RunTest_RecursiveSupport1(MessagePumpFactory factory);
 void RunTest_NonNestableWithNoNesting(MessagePumpFactory factory);
-void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory,
-                                     bool use_delayed);
+void RunTest_NonNestableInNestedLoop(MessagePumpFactory factory);
 void RunTest_QuitNow(MessagePumpFactory factory);
 void RunTest_RunLoopQuitTop(MessagePumpFactory factory);
 void RunTest_RunLoopQuitNested(MessagePumpFactory factory);
@@ -96,11 +96,8 @@
   TEST(MessageLoopTestType##id, NonNestableWithNoNesting) { \
     base::test::RunTest_NonNestableWithNoNesting(factory); \
   } \
-  TEST(MessageLoopTestType##id, NonNestableInNestedLoop) { \
-    base::test::RunTest_NonNestableInNestedLoop(factory, false); \
-  } \
   TEST(MessageLoopTestType##id, NonNestableDelayedInNestedLoop) { \
-    base::test::RunTest_NonNestableInNestedLoop(factory, true); \
+    base::test::RunTest_NonNestableInNestedLoop(factory); \
   } \
   TEST(MessageLoopTestType##id, QuitNow) { \
     base::test::RunTest_QuitNow(factory); \
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index a06ba91..bc4176f 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -20,9 +20,9 @@
 #include "base/run_loop.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/test/test_simple_task_runner.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -30,6 +30,7 @@
 #include "base/message_loop/message_pump_win.h"
 #include "base/process/memory.h"
 #include "base/strings/string16.h"
+#include "base/win/current_module.h"
 #include "base/win/scoped_handle.h"
 #endif
 
@@ -40,15 +41,15 @@
 
 namespace {
 
-scoped_ptr<MessagePump> TypeDefaultMessagePumpFactory() {
+std::unique_ptr<MessagePump> TypeDefaultMessagePumpFactory() {
   return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_DEFAULT);
 }
 
-scoped_ptr<MessagePump> TypeIOMessagePumpFactory() {
+std::unique_ptr<MessagePump> TypeIOMessagePumpFactory() {
   return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_IO);
 }
 
-scoped_ptr<MessagePump> TypeUIMessagePumpFactory() {
+std::unique_ptr<MessagePump> TypeUIMessagePumpFactory() {
   return MessageLoop::CreateMessagePumpForType(MessageLoop::TYPE_UI);
 }
 
@@ -449,8 +450,6 @@
 TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal, bool wait)
     : signal_(signal), wait_(wait) {
   memset(buffer_, 0, sizeof(buffer_));
-  memset(&context_, 0, sizeof(context_));
-  context_.handler = this;
 
   file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING,
                        FILE_FLAG_OVERLAPPED, NULL));
@@ -923,7 +922,7 @@
 
 TEST(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
   MessageLoop loop(MessageLoop::TYPE_UI);
-  HINSTANCE instance = GetModuleFromAddress(&TestWndProcThunk);
+  HINSTANCE instance = CURRENT_MODULE();
   WNDCLASSEX wc = {0};
   wc.cbSize = sizeof(wc);
   wc.lpfnWndProc = TestWndProcThunk;
@@ -970,7 +969,7 @@
   // It should be possible to delete an unbound message loop on a thread which
   // already has another active loop. This happens when thread creation fails.
   MessageLoop loop;
-  scoped_ptr<MessageLoop> unbound_loop(MessageLoop::CreateUnbound(
+  std::unique_ptr<MessageLoop> unbound_loop(MessageLoop::CreateUnbound(
       MessageLoop::TYPE_DEFAULT, MessageLoop::MessagePumpFactoryCallback()));
   unbound_loop.reset();
   EXPECT_EQ(&loop, MessageLoop::current());
diff --git a/base/message_loop/message_pump_glib.h b/base/message_loop/message_pump_glib.h
index 9f44571..a2b54d8 100644
--- a/base/message_loop/message_pump_glib.h
+++ b/base/message_loop/message_pump_glib.h
@@ -5,8 +5,10 @@
 #ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
 #define BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
 
+#include <memory>
+
 #include "base/base_export.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/macros.h"
 #include "base/message_loop/message_pump.h"
 #include "base/observer_list.h"
 #include "base/time/time.h"
@@ -68,7 +70,7 @@
   int wakeup_pipe_read_;
   int wakeup_pipe_write_;
   // Use a scoped_ptr to avoid needing the definition of GPollFD in the header.
-  scoped_ptr<GPollFD> wakeup_gpollfd_;
+  std::unique_ptr<GPollFD> wakeup_gpollfd_;
 
   DISALLOW_COPY_AND_ASSIGN(MessagePumpGlib);
 };
diff --git a/base/message_loop/message_pump_libevent.cc b/base/message_loop/message_pump_libevent.cc
index 72726a8..5aa5567 100644
--- a/base/message_loop/message_pump_libevent.cc
+++ b/base/message_loop/message_pump_libevent.cc
@@ -7,25 +7,19 @@
 #include <errno.h>
 #include <unistd.h>
 
+#include <memory>
+
 #include "base/auto_reset.h"
 #include "base/compiler_specific.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/observer_list.h"
 #include "base/posix/eintr_wrapper.h"
+#include "base/third_party/libevent/event.h"
 #include "base/time/time.h"
 #include "base/trace_event/trace_event.h"
 #include "build/build_config.h"
 
-#if defined(__ANDROID__) || defined(__ANDROID_HOST__)
-#include <event2/event.h>
-#include <event2/event_compat.h>
-#include <event2/event_struct.h>
-#else
-#include "third_party/libevent/event.h"
-#endif
-
 #if defined(OS_MACOSX)
 #include "base/mac/scoped_nsautorelease_pool.h"
 #endif
@@ -94,22 +88,20 @@
 }
 
 void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
-    int fd, MessagePumpLibevent* pump) {
+    int fd,
+    MessagePumpLibevent*) {
   // Since OnFileCanWriteWithoutBlocking() gets called first, it can stop
   // watching the file descriptor.
   if (!watcher_)
     return;
-  pump->WillProcessIOEvent();
   watcher_->OnFileCanReadWithoutBlocking(fd);
-  pump->DidProcessIOEvent();
 }
 
 void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanWriteWithoutBlocking(
-    int fd, MessagePumpLibevent* pump) {
+    int fd,
+    MessagePumpLibevent*) {
   DCHECK(watcher_);
-  pump->WillProcessIOEvent();
   watcher_->OnFileCanWriteWithoutBlocking(fd);
-  pump->DidProcessIOEvent();
 }
 
 MessagePumpLibevent::MessagePumpLibevent()
@@ -160,7 +152,7 @@
     event_mask |= EV_WRITE;
   }
 
-  scoped_ptr<event> evt(controller->ReleaseEvent());
+  std::unique_ptr<event> evt(controller->ReleaseEvent());
   if (evt.get() == NULL) {
     // Ownership is transferred to the controller.
     evt.reset(new event);
@@ -205,17 +197,8 @@
   return true;
 }
 
-void MessagePumpLibevent::AddIOObserver(IOObserver *obs) {
-  io_observers_.AddObserver(obs);
-}
-
-void MessagePumpLibevent::RemoveIOObserver(IOObserver *obs) {
-  io_observers_.RemoveObserver(obs);
-}
-
 // Tell libevent to break out of inner loop.
-static void timer_callback(int /* fd */, short /* events */, void *context)
-{
+static void timer_callback(int /*fd*/, short /*events*/, void* context) {
   event_base_loopbreak((struct event_base *)context);
 }
 
@@ -226,7 +209,7 @@
 
   // event_base_loopexit() + EVLOOP_ONCE is leaky, see http://crbug.com/25641.
   // Instead, make our own timer and reuse it on each call to event_base_loop().
-  scoped_ptr<event> timer_event(new event);
+  std::unique_ptr<event> timer_event(new event);
 
   for (;;) {
 #if defined(OS_MACOSX)
@@ -307,14 +290,6 @@
   delayed_work_time_ = delayed_work_time;
 }
 
-void MessagePumpLibevent::WillProcessIOEvent() {
-  FOR_EACH_OBSERVER(IOObserver, io_observers_, WillProcessIOEvent());
-}
-
-void MessagePumpLibevent::DidProcessIOEvent() {
-  FOR_EACH_OBSERVER(IOObserver, io_observers_, DidProcessIOEvent());
-}
-
 bool MessagePumpLibevent::Init() {
   int fds[2];
   if (pipe(fds)) {
@@ -374,8 +349,7 @@
 
 // Called if a byte is received on the wakeup pipe.
 // static
-void MessagePumpLibevent::OnWakeup(int socket, short /* flags */,
-                                   void* context) {
+void MessagePumpLibevent::OnWakeup(int socket, short /*flags*/, void* context) {
   MessagePumpLibevent* that = static_cast<MessagePumpLibevent*>(context);
   DCHECK(that->wakeup_pipe_out_ == socket);
 
diff --git a/base/message_loop/message_pump_libevent.h b/base/message_loop/message_pump_libevent.h
index 4d2f4f7..76f882f 100644
--- a/base/message_loop/message_pump_libevent.h
+++ b/base/message_loop/message_pump_libevent.h
@@ -8,7 +8,6 @@
 #include "base/compiler_specific.h"
 #include "base/macros.h"
 #include "base/message_loop/message_pump.h"
-#include "base/observer_list.h"
 #include "base/threading/thread_checker.h"
 #include "base/time/time.h"
 
@@ -22,21 +21,6 @@
 // TODO(dkegel): add support for background file IO somehow
 class BASE_EXPORT MessagePumpLibevent : public MessagePump {
  public:
-  class IOObserver {
-   public:
-    IOObserver() {}
-
-    // An IOObserver is an object that receives IO notifications from the
-    // MessagePump.
-    //
-    // NOTE: An IOObserver implementation should be extremely fast!
-    virtual void WillProcessIOEvent() = 0;
-    virtual void DidProcessIOEvent() = 0;
-
-   protected:
-    virtual ~IOObserver() {}
-  };
-
   // Used with WatchFileDescriptor to asynchronously monitor the I/O readiness
   // of a file descriptor.
   class Watcher {
@@ -119,9 +103,6 @@
                            FileDescriptorWatcher *controller,
                            Watcher *delegate);
 
-  void AddIOObserver(IOObserver* obs);
-  void RemoveIOObserver(IOObserver* obs);
-
   // MessagePump methods:
   void Run(Delegate* delegate) override;
   void Quit() override;
@@ -168,7 +149,6 @@
   // ... libevent wrapper for read end
   event* wakeup_event_;
 
-  ObserverList<IOObserver> io_observers_;
   ThreadChecker watch_file_descriptor_caller_checker_;
   DISALLOW_COPY_AND_ASSIGN(MessagePumpLibevent);
 };
diff --git a/base/message_loop/message_pump_win.h b/base/message_loop/message_pump_win.h
deleted file mode 100644
index e69de29..0000000
--- a/base/message_loop/message_pump_win.h
+++ /dev/null
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index 78862fa..3b398cd 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -7,8 +7,10 @@
 #include <algorithm>
 
 #include "base/build_time.h"
+#include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/strings/utf_string_conversions.h"
@@ -115,6 +117,28 @@
   }
 }
 
+// A second copy of FieldTrialList::seen_states_ that is meant to outlive the
+// FieldTrialList object to determine if the inconsistency happens because there
+// might be multiple FieldTrialList objects.
+// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
+base::LazyInstance<std::map<std::string, std::string>>::Leaky g_seen_states =
+    LAZY_INSTANCE_INITIALIZER;
+
+// A debug token generated during FieldTrialList construction. Used to diagnose
+// crbug.com/359406.
+// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
+int32_t g_debug_token = -1;
+
+// Whether to append the debug token to the child process --force-fieldtrials
+// command line. Used to diagnose crbug.com/359406.
+// TODO(asvitkine): Remove when crbug.com/359406 is resolved.
+bool g_append_debug_token_to_trial_string = false;
+
+// Tracks whether |g_seen_states| is used. Defaults to false, because unit tests
+// will create multiple FieldTrialList instances. Also controls whether
+// |g_debug_token| is included in the field trial state string.
+bool g_use_global_check_states = false;
+
 }  // namespace
 
 // statics
@@ -218,7 +242,9 @@
 
 // static
 void FieldTrial::EnableBenchmarking() {
-  DCHECK_EQ(0u, FieldTrialList::GetFieldTrialCount());
+  // TODO(asvitkine): Change this back to 0u after the trial in FieldTrialList
+  // constructor is removed.
+  DCHECK_EQ(1u, FieldTrialList::GetFieldTrialCount());
   enable_benchmarking_ = true;
 }
 
@@ -250,6 +276,9 @@
   DCHECK_GT(total_probability, 0);
   DCHECK(!trial_name_.empty());
   DCHECK(!default_group_name_.empty());
+
+  if (g_debug_token == -1)
+    g_debug_token = RandInt(1, INT32_MAX);
 }
 
 FieldTrial::~FieldTrial() {}
@@ -324,6 +353,30 @@
   Time::Exploded exploded;
   two_years_from_build_time.LocalExplode(&exploded);
   kNoExpirationYear = exploded.year;
+
+  // Run a 50/50 experiment that enables |g_use_global_check_states| only for
+  // half the users, to investigate if this instrumentation is causing the
+  // crashes to disappear for http://crbug.com/359406. Done here instead of a
+  // server-side trial because this needs to be done early during FieldTrialList
+  // initialization.
+  //
+  // Note: |g_use_global_check_states| is set via EnableGlobalStateChecks()
+  // prior to the FieldTrialList being created. We only want to do the trial
+  // check once the first time FieldTrialList is created, so use a static
+  // |first_time| variable to track this.
+  //
+  // TODO(asvitkine): Remove after http://crbug.com/359406 is fixed.
+  static bool first_time = true;
+  if (first_time && g_use_global_check_states) {
+    first_time = false;
+    base::FieldTrial* trial =
+        FactoryGetFieldTrial("UMA_CheckStates", 100, "NoChecks",
+                             kNoExpirationYear, 1, 1,
+                             FieldTrial::SESSION_RANDOMIZED, nullptr);
+    trial->AppendGroup("Checks", 50);
+    if (trial->group_name() == "NoChecks")
+      g_use_global_check_states = false;
+  }
 }
 
 FieldTrialList::~FieldTrialList() {
@@ -338,6 +391,18 @@
 }
 
 // static
+void FieldTrialList::EnableGlobalStateChecks() {
+  CHECK(!g_use_global_check_states);
+  g_use_global_check_states = true;
+  g_append_debug_token_to_trial_string = true;
+}
+
+// static
+int32_t FieldTrialList::GetDebugToken() {
+  return g_debug_token;
+}
+
+// static
 FieldTrial* FieldTrialList::FactoryGetFieldTrial(
     const std::string& trial_name,
     FieldTrial::Probability total_probability,
@@ -348,8 +413,8 @@
     FieldTrial::RandomizationType randomization_type,
     int* default_group_number) {
   return FactoryGetFieldTrialWithRandomizationSeed(
-      trial_name, total_probability, default_group_name,
-      year, month, day_of_month, randomization_type, 0, default_group_number);
+      trial_name, total_probability, default_group_name, year, month,
+      day_of_month, randomization_type, 0, default_group_number, NULL);
 }
 
 // static
@@ -362,7 +427,8 @@
     const int day_of_month,
     FieldTrial::RandomizationType randomization_type,
     uint32_t randomization_seed,
-    int* default_group_number) {
+    int* default_group_number,
+    const FieldTrial::EntropyProvider* override_entropy_provider) {
   if (default_group_number)
     *default_group_number = FieldTrial::kDefaultGroupNumber;
   // Check if the field trial has already been created in some other way.
@@ -396,8 +462,10 @@
 
   double entropy_value;
   if (randomization_type == FieldTrial::ONE_TIME_RANDOMIZED) {
+    // If an override entropy provider is given, use it.
     const FieldTrial::EntropyProvider* entropy_provider =
-        GetEntropyProviderForOneTimeRandomization();
+        override_entropy_provider ? override_entropy_provider
+                                  : GetEntropyProviderForOneTimeRandomization();
     CHECK(entropy_provider);
     entropy_value = entropy_provider->GetEntropyForTrial(trial_name,
                                                          randomization_seed);
@@ -466,6 +534,12 @@
     output->append(it->group_name);
     output->append(1, kPersistentStringSeparator);
   }
+  if (g_append_debug_token_to_trial_string) {
+    output->append("DebugToken");
+    output->append(1, kPersistentStringSeparator);
+    output->append(IntToString(g_debug_token));
+    output->append(1, kPersistentStringSeparator);
+  }
 }
 
 // static
@@ -489,8 +563,13 @@
     trial.group_name.AppendToString(output);
     output->append(1, kPersistentStringSeparator);
 
+    // TODO(asvitkine): Remove these when http://crbug.com/359406 is fixed.
     CheckTrialGroup(trial.trial_name.as_string(), trial.group_name.as_string(),
                     &global_->seen_states_);
+    if (g_use_global_check_states) {
+      CheckTrialGroup(trial.trial_name.as_string(),
+                      trial.group_name.as_string(), &g_seen_states.Get());
+    }
   }
 }
 
@@ -615,10 +694,15 @@
   if (!field_trial->enable_field_trial_)
     return;
 
+  // TODO(asvitkine): Remove this block when http://crbug.com/359406 is fixed.
   {
     AutoLock auto_lock(global_->lock_);
     CheckTrialGroup(field_trial->trial_name(),
                     field_trial->group_name_internal(), &global_->seen_states_);
+    if (g_use_global_check_states) {
+      CheckTrialGroup(field_trial->trial_name(),
+                      field_trial->group_name_internal(), &g_seen_states.Get());
+    }
   }
   global_->observer_list_->Notify(
       FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index 95cf504..fc6237a 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -347,6 +347,20 @@
   // Destructor Release()'s references to all registered FieldTrial instances.
   ~FieldTrialList();
 
+  // TODO(asvitkine): Temporary function to diagnose http://crbug.com/359406.
+  // Remove when that bug is fixed. This enables using a global map that checks
+  // the state of field trials between possible FieldTrialList instances. If
+  // enabled, a CHECK will be hit if it's seen that a field trial is given a
+  // different state then what was specified to a renderer process launch
+  // command line.
+  static void EnableGlobalStateChecks();
+
+  // TODO(asvitkine): Temporary function to diagnose http://crbug.com/359406.
+  // Remove when that bug is fixed. This returns a unique token generated during
+  // FieldTrialList construction. This is used to verify that this value stays
+  // consistent between renderer process invocations.
+  static int32_t GetDebugToken();
+
   // Get a FieldTrial instance from the factory.
   //
   // |name| is used to register the instance with the FieldTrialList class,
@@ -379,9 +393,12 @@
   // used on one-time randomized field trials (instead of a hash of the trial
   // name, which is used otherwise or if |randomization_seed| has value 0). The
   // |randomization_seed| value (other than 0) should never be the same for two
-  // trials, else this would result in correlated group assignments.
-  // Note: Using a custom randomization seed is only supported by the
-  // PermutedEntropyProvider (which is used when UMA is not enabled).
+  // trials, else this would result in correlated group assignments.  Note:
+  // Using a custom randomization seed is only supported by the
+  // PermutedEntropyProvider (which is used when UMA is not enabled). If
+  // |override_entropy_provider| is not null, then it will be used for
+  // randomization instead of the provider given when the FieldTrialList was
+  // instanciated.
   static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
       const std::string& trial_name,
       FieldTrial::Probability total_probability,
@@ -391,7 +408,8 @@
       const int day_of_month,
       FieldTrial::RandomizationType randomization_type,
       uint32_t randomization_seed,
-      int* default_group_number);
+      int* default_group_number,
+      const FieldTrial::EntropyProvider* override_entropy_provider);
 
   // The Find() method can be used to test to see if a named trial was already
   // registered, or to retrieve a pointer to it from the global map.
@@ -514,7 +532,7 @@
 
   // Entropy provider to be used for one-time randomized field trials. If NULL,
   // one-time randomization is not supported.
-  scoped_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
+  std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
 
   // List of observers to be notified when a group is selected for a FieldTrial.
   scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index 713ca06..0e363e3 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -18,6 +18,7 @@
 #include "base/compiler_specific.h"
 #include "base/debug/alias.h"
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/metrics_hashes.h"
 #include "base/metrics/persistent_histogram_allocator.h"
@@ -94,7 +95,6 @@
           uint32_t bucket_count,
           int32_t flags)
     : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
-
   virtual ~Factory() = default;
 
   // Create histogram based on construction parameters. Caller takes
@@ -124,14 +124,14 @@
 
   // Allocate the correct Histogram object off the heap (in case persistent
   // memory is not available).
-  virtual scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
-    return make_scoped_ptr(new Histogram(name_, minimum_, maximum_, ranges));
+  virtual std::unique_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
+    return WrapUnique(new Histogram(name_, minimum_, maximum_, ranges));
   }
 
   // Perform any required datafill on the just-created histogram.  If
   // overridden, be sure to call the "super" version -- this method may not
   // always remain empty.
-  virtual void FillHistogram(HistogramBase* /* histogram */) {}
+  virtual void FillHistogram(HistogramBase* /*histogram*/) {}
 
   // These values are protected (instead of private) because they need to
   // be accessible to methods of sub-classes in order to avoid passing
@@ -148,12 +148,6 @@
 };
 
 HistogramBase* Histogram::Factory::Build() {
-  // Import histograms from known persistent storage. Histograms could have
-  // been added by other processes and they must be fetched and recognized
-  // locally in order to be found by FindHistograms() below. If the persistent
-  // memory segment is not shared between processes, this call does nothing.
-  PersistentHistogramAllocator::ImportGlobalHistograms();
-
   HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
   if (!histogram) {
     // To avoid racy destruction at shutdown, the following will be leaked.
@@ -178,9 +172,8 @@
     // allocating from it fails, code below will allocate the histogram from
     // the process heap.
     PersistentHistogramAllocator::Reference histogram_ref = 0;
-    scoped_ptr<HistogramBase> tentative_histogram;
-    PersistentHistogramAllocator* allocator =
-        PersistentHistogramAllocator::GetGlobalAllocator();
+    std::unique_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
     if (allocator) {
       tentative_histogram = allocator->AllocateHistogram(
           histogram_type_,
@@ -234,7 +227,7 @@
     // return NULL here with the expectation that bad code in Chrome will crash
     // on dereference, but extension/Pepper APIs will guard against NULL and not
     // crash.
-    LOG(ERROR) << "Histogram " << name_ << " has bad construction arguments";
+    DLOG(ERROR) << "Histogram " << name_ << " has bad construction arguments";
     return nullptr;
   }
   return histogram;
@@ -279,7 +272,7 @@
                         flags);
 }
 
-scoped_ptr<HistogramBase> Histogram::PersistentCreate(
+std::unique_ptr<HistogramBase> Histogram::PersistentCreate(
     const std::string& name,
     Sample minimum,
     Sample maximum,
@@ -289,9 +282,9 @@
     uint32_t counts_size,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return make_scoped_ptr(new Histogram(
-      name, minimum, maximum, ranges, counts, logged_counts, counts_size,
-      meta, logged_meta));
+  return WrapUnique(new Histogram(name, minimum, maximum, ranges, counts,
+                                  logged_counts, counts_size, meta,
+                                  logged_meta));
 }
 
 // Calculate what range of values are held in each bucket.
@@ -442,12 +435,14 @@
   FindAndRunCallback(value);
 }
 
-scoped_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
+std::unique_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
   return SnapshotSampleVector();
 }
 
-scoped_ptr<HistogramSamples> Histogram::SnapshotDelta() {
-  scoped_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
+std::unique_ptr<HistogramSamples> Histogram::SnapshotDelta() {
+  DCHECK(!final_delta_created_);
+
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
   if (!logged_samples_) {
     // If nothing has been previously logged, save this one as
     // |logged_samples_| and gather another snapshot to return.
@@ -461,6 +456,18 @@
   return snapshot;
 }
 
+std::unique_ptr<HistogramSamples> Histogram::SnapshotFinalDelta() const {
+  DCHECK(!final_delta_created_);
+  final_delta_created_ = true;
+
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
+
+  // Subtract what was previously logged and then return.
+  if (logged_samples_)
+    snapshot->Subtract(*logged_samples_);
+  return snapshot;
+}
+
 void Histogram::AddSamples(const HistogramSamples& samples) {
   samples_->Add(samples);
 }
@@ -527,7 +534,7 @@
 Histogram::~Histogram() {
 }
 
-bool Histogram::PrintEmptyBucket(uint32_t /* index */) const {
+bool Histogram::PrintEmptyBucket(uint32_t /*index*/) const {
   return true;
 }
 
@@ -577,8 +584,8 @@
   return histogram;
 }
 
-scoped_ptr<SampleVector> Histogram::SnapshotSampleVector() const {
-  scoped_ptr<SampleVector> samples(
+std::unique_ptr<SampleVector> Histogram::SnapshotSampleVector() const {
+  std::unique_ptr<SampleVector> samples(
       new SampleVector(samples_->id(), bucket_ranges()));
   samples->Add(*samples_);
   return samples;
@@ -589,7 +596,7 @@
                                std::string* output) const {
   // Get local (stack) copies of all effectively volatile class data so that we
   // are consistent across our output activities.
-  scoped_ptr<SampleVector> snapshot = SnapshotSampleVector();
+  std::unique_ptr<SampleVector> snapshot = SnapshotSampleVector();
   Count sample_count = snapshot->TotalCount();
 
   WriteAsciiHeader(*snapshot, sample_count, output);
@@ -702,14 +709,14 @@
 void Histogram::GetCountAndBucketData(Count* count,
                                       int64_t* sum,
                                       ListValue* buckets) const {
-  scoped_ptr<SampleVector> snapshot = SnapshotSampleVector();
+  std::unique_ptr<SampleVector> snapshot = SnapshotSampleVector();
   *count = snapshot->TotalCount();
   *sum = snapshot->sum();
   uint32_t index = 0;
   for (uint32_t i = 0; i < bucket_count(); ++i) {
     Sample count_at_index = snapshot->GetCountAtIndex(i);
     if (count_at_index > 0) {
-      scoped_ptr<DictionaryValue> bucket_value(new DictionaryValue());
+      std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
       bucket_value->SetInteger("low", ranges(i));
       if (i != bucket_count() - 1)
         bucket_value->SetInteger("high", ranges(i + 1));
@@ -737,6 +744,7 @@
                          bucket_count, flags) {
     descriptions_ = descriptions;
   }
+  ~Factory() override = default;
 
  protected:
   BucketRanges* CreateRanges() override {
@@ -745,8 +753,9 @@
     return ranges;
   }
 
-  scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) override {
-    return make_scoped_ptr(
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(
         new LinearHistogram(name_, minimum_, maximum_, ranges));
   }
 
@@ -806,7 +815,7 @@
                         flags);
 }
 
-scoped_ptr<HistogramBase> LinearHistogram::PersistentCreate(
+std::unique_ptr<HistogramBase> LinearHistogram::PersistentCreate(
     const std::string& name,
     Sample minimum,
     Sample maximum,
@@ -816,9 +825,9 @@
     uint32_t counts_size,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return make_scoped_ptr(new LinearHistogram(
-      name, minimum, maximum, ranges, counts, logged_counts, counts_size,
-      meta, logged_meta));
+  return WrapUnique(new LinearHistogram(name, minimum, maximum, ranges,
+                                              counts, logged_counts,
+                                              counts_size, meta, logged_meta));
 }
 
 HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
@@ -928,6 +937,7 @@
  public:
   Factory(const std::string& name, int32_t flags)
     : Histogram::Factory(name, BOOLEAN_HISTOGRAM, 1, 2, 3, flags) {}
+  ~Factory() override = default;
 
  protected:
   BucketRanges* CreateRanges() override {
@@ -936,8 +946,9 @@
     return ranges;
   }
 
-  scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) override {
-    return make_scoped_ptr(new BooleanHistogram(name_, ranges));
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(new BooleanHistogram(name_, ranges));
   }
 
  private:
@@ -953,14 +964,14 @@
   return FactoryGet(std::string(name), flags);
 }
 
-scoped_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
+std::unique_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
     const std::string& name,
     const BucketRanges* ranges,
     HistogramBase::AtomicCount* counts,
     HistogramBase::AtomicCount* logged_counts,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return make_scoped_ptr(new BooleanHistogram(
+  return WrapUnique(new BooleanHistogram(
       name, ranges, counts, logged_counts, meta, logged_meta));
 }
 
@@ -1015,6 +1026,7 @@
     : Histogram::Factory(name, CUSTOM_HISTOGRAM, 0, 0, 0, flags) {
     custom_ranges_ = custom_ranges;
   }
+  ~Factory() override = default;
 
  protected:
   BucketRanges* CreateRanges() override {
@@ -1033,8 +1045,9 @@
     return bucket_ranges;
   }
 
-  scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) override {
-    return make_scoped_ptr(new CustomHistogram(name_, ranges));
+  std::unique_ptr<HistogramBase> HeapAlloc(
+      const BucketRanges* ranges) override {
+    return WrapUnique(new CustomHistogram(name_, ranges));
   }
 
  private:
@@ -1059,7 +1072,7 @@
   return FactoryGet(std::string(name), custom_ranges, flags);
 }
 
-scoped_ptr<HistogramBase> CustomHistogram::PersistentCreate(
+std::unique_ptr<HistogramBase> CustomHistogram::PersistentCreate(
     const std::string& name,
     const BucketRanges* ranges,
     HistogramBase::AtomicCount* counts,
@@ -1067,7 +1080,7 @@
     uint32_t counts_size,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return make_scoped_ptr(new CustomHistogram(
+  return WrapUnique(new CustomHistogram(
       name, ranges, counts, logged_counts, counts_size, meta, logged_meta));
 }
 
@@ -1127,8 +1140,7 @@
   return true;
 }
 
-double CustomHistogram::GetBucketSize(Count /* current */,
-                                      uint32_t /* i */) const {
+double CustomHistogram::GetBucketSize(Count /*current*/, uint32_t /*i*/) const {
   return 1;
 }
 
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
index 5111b8f..2283a4d 100644
--- a/base/metrics/histogram.h
+++ b/base/metrics/histogram.h
@@ -70,6 +70,7 @@
 #include <stdint.h>
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -78,7 +79,6 @@
 #include "base/gtest_prod_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_base.h"
 // TODO(asvitkine): Migrate callers to to include this directly and remove this.
@@ -142,7 +142,7 @@
                                        int32_t flags);
 
   // Create a histogram using data in persistent storage.
-  static scoped_ptr<HistogramBase> PersistentCreate(
+  static std::unique_ptr<HistogramBase> PersistentCreate(
       const std::string& name,
       Sample minimum,
       Sample maximum,
@@ -202,8 +202,9 @@
                                 uint32_t expected_bucket_count) const override;
   void Add(Sample value) override;
   void AddCount(Sample value, int count) override;
-  scoped_ptr<HistogramSamples> SnapshotSamples() const override;
-  scoped_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
   void WriteHTMLGraph(std::string* output) const override;
@@ -268,7 +269,7 @@
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
 
   // Implementation of SnapshotSamples function.
-  scoped_ptr<SampleVector> SnapshotSampleVector() const;
+  std::unique_ptr<SampleVector> SnapshotSampleVector() const;
 
   //----------------------------------------------------------------------------
   // Helpers for emitting Ascii graphic.  Each method appends data to output.
@@ -308,10 +309,14 @@
 
   // Finally, provide the state that changes with the addition of each new
   // sample.
-  scoped_ptr<SampleVector> samples_;
+  std::unique_ptr<SampleVector> samples_;
 
   // Also keep a previous uploaded state for calculating deltas.
-  scoped_ptr<HistogramSamples> logged_samples_;
+  std::unique_ptr<HistogramSamples> logged_samples_;
+
+  // Flag to indicate if PrepareFinalDelta has been previously called. It is
+  // used to DCHECK that a final delta is not created multiple times.
+  mutable bool final_delta_created_ = false;
 
   DISALLOW_COPY_AND_ASSIGN(Histogram);
 };
@@ -352,7 +357,7 @@
                                        int32_t flags);
 
   // Create a histogram using data in persistent storage.
-  static scoped_ptr<HistogramBase> PersistentCreate(
+  static std::unique_ptr<HistogramBase> PersistentCreate(
       const std::string& name,
       Sample minimum,
       Sample maximum,
@@ -443,7 +448,7 @@
   static HistogramBase* FactoryGet(const char* name, int32_t flags);
 
   // Create a histogram using data in persistent storage.
-  static scoped_ptr<HistogramBase> PersistentCreate(
+  static std::unique_ptr<HistogramBase> PersistentCreate(
       const std::string& name,
       const BucketRanges* ranges,
       HistogramBase::AtomicCount* counts,
@@ -493,7 +498,7 @@
                                    int32_t flags);
 
   // Create a histogram using data in persistent storage.
-  static scoped_ptr<HistogramBase> PersistentCreate(
+  static std::unique_ptr<HistogramBase> PersistentCreate(
       const std::string& name,
       const BucketRanges* ranges,
       HistogramBase::AtomicCount* counts,
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
index 0152bf7..8c4f1ec 100644
--- a/base/metrics/histogram_base.cc
+++ b/base/metrics/histogram_base.cc
@@ -6,11 +6,11 @@
 
 #include <limits.h>
 
+#include <memory>
 #include <utility>
 
 #include "base/json/json_string_value_serializer.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/sparse_histogram.h"
@@ -98,7 +98,7 @@
 }
 
 uint32_t HistogramBase::FindCorruption(
-    const HistogramSamples& /* samples */) const {
+    const HistogramSamples& /*samples*/) const {
   // Not supported by default.
   return NO_INCONSISTENCIES;
 }
@@ -106,9 +106,9 @@
 void HistogramBase::WriteJSON(std::string* output) const {
   Count count;
   int64_t sum;
-  scoped_ptr<ListValue> buckets(new ListValue());
+  std::unique_ptr<ListValue> buckets(new ListValue());
   GetCountAndBucketData(&count, &sum, buckets.get());
-  scoped_ptr<DictionaryValue> parameters(new DictionaryValue());
+  std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
   GetParameters(parameters.get());
 
   JSONStringValueSerializer serializer(output);
@@ -129,8 +129,8 @@
   DCHECK(!report_histogram_);
   size_t existing = StatisticsRecorder::GetHistogramCount();
   if (existing != 0) {
-    DLOG(WARNING) << existing
-                  << " histograms were created before reporting was enabled.";
+    DVLOG(1) << existing
+             << " histograms were created before reporting was enabled.";
   }
 
   std::string name =
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
index f11befd..58a9ab2 100644
--- a/base/metrics/histogram_base.h
+++ b/base/metrics/histogram_base.h
@@ -9,13 +9,13 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 #include "base/time/time.h"
 
@@ -195,12 +195,21 @@
 
   // Snapshot the current complete set of sample data.
   // Override with atomic/locked snapshot if needed.
-  virtual scoped_ptr<HistogramSamples> SnapshotSamples() const = 0;
+  virtual std::unique_ptr<HistogramSamples> SnapshotSamples() const = 0;
 
   // Calculate the change (delta) in histogram counts since the previous call
   // to this method. Each successive call will return only those counts
   // changed since the last call.
-  virtual scoped_ptr<HistogramSamples> SnapshotDelta() = 0;
+  virtual std::unique_ptr<HistogramSamples> SnapshotDelta() = 0;
+
+  // Calculate the change (delta) in histogram counts since the previous call
+  // to SnapshotDelta() but do so without modifying any internal data as to
+  // what was previous logged. After such a call, no further calls to this
+  // method or to SnapshotDelta() should be done as the result would include
+  // data previously returned. Because no internal data is changed, this call
+  // can be made on "const" histograms such as those with data held in
+  // read-only memory.
+  virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0;
 
   // The following methods provide graphical histogram displays.
   virtual void WriteHTMLGraph(std::string* output) const = 0;
diff --git a/base/metrics/histogram_base_unittest.cc b/base/metrics/histogram_base_unittest.cc
index 6b41597..5ce39ca 100644
--- a/base/metrics/histogram_base_unittest.cc
+++ b/base/metrics/histogram_base_unittest.cc
@@ -38,7 +38,7 @@
   }
 
  private:
-  scoped_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
 
   DISALLOW_COPY_AND_ASSIGN(HistogramBaseTest);
 };
@@ -181,7 +181,7 @@
   CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
   SparseHistogram::FactoryGet("CRH-Sparse", 0);
 
-  scoped_ptr<HistogramSamples> samples = report->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples = report->SnapshotSamples();
   EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
   EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
   EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
diff --git a/base/metrics/histogram_delta_serialization.h b/base/metrics/histogram_delta_serialization.h
index a05a1a7..3bb04cb 100644
--- a/base/metrics/histogram_delta_serialization.h
+++ b/base/metrics/histogram_delta_serialization.h
@@ -5,12 +5,12 @@
 #ifndef BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
 #define BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/base_export.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_flattener.h"
 #include "base/metrics/histogram_snapshot_manager.h"
 #include "base/threading/thread_checker.h"
diff --git a/base/metrics/histogram_delta_serialization_unittest.cc b/base/metrics/histogram_delta_serialization_unittest.cc
index 93f7198..80a7009 100644
--- a/base/metrics/histogram_delta_serialization_unittest.cc
+++ b/base/metrics/histogram_delta_serialization_unittest.cc
@@ -34,7 +34,7 @@
   HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
 
   // The histogram has kIPCSerializationSourceFlag. So samples will be ignored.
-  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(1, snapshot->GetCount(1));
   EXPECT_EQ(1, snapshot->GetCount(10));
   EXPECT_EQ(1, snapshot->GetCount(100));
@@ -44,7 +44,7 @@
   histogram->ClearFlags(HistogramBase::kIPCSerializationSourceFlag);
   HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
 
-  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
   EXPECT_EQ(2, snapshot2->GetCount(1));
   EXPECT_EQ(2, snapshot2->GetCount(10));
   EXPECT_EQ(2, snapshot2->GetCount(100));
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index fa23bea..ce1811a 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -111,7 +111,13 @@
 
 //------------------------------------------------------------------------------
 // Provide easy general purpose histogram in a macro, just like stats counters.
-// The first four macros use 50 buckets.
+// Most of these macros use 50 buckets, but check the definition for details.
+//
+// All of these macros must be called with |name| as a runtime constant --- it
+// doesn't have to literally be a constant, but it must be the same string on
+// all calls from a particular call site. If this rule is violated,
+// STATIC_HISTOGRAM_POINTER_BLOCK will DCHECK, and if DCHECKS are disabled, the
+// data will be written to the wrong histogram.
 
 #define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
     name, sample, base::TimeDelta::FromMilliseconds(1), \
diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
index e3a4013..ea3b987 100644
--- a/base/metrics/histogram_samples.cc
+++ b/base/metrics/histogram_samples.cc
@@ -82,23 +82,10 @@
 
 HistogramSamples::~HistogramSamples() {}
 
-// Despite using atomic operations, the increment/add actions below are *not*
-// atomic! Race conditions may cause loss of samples or even completely corrupt
-// the 64-bit sum on 32-bit machines. This is done intentionally to reduce the
-// cost of these operations that could be executed in performance-significant
-//  points of the code.
-//
-// TODO(bcwhite): Gather quantitative information as to the cost of using
-// proper atomic increments and improve either globally or for those histograms
-// that really need it.
-
 void HistogramSamples::Add(const HistogramSamples& other) {
-  meta_->sum += other.sum();
-
-  HistogramBase::Count old_redundant_count =
-      subtle::NoBarrier_Load(&meta_->redundant_count);
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-      old_redundant_count + other.redundant_count());
+  IncreaseSum(other.sum());
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+                                    other.redundant_count());
   bool success = AddSubtractImpl(other.Iterator().get(), ADD);
   DCHECK(success);
 }
@@ -110,39 +97,32 @@
   if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
     return false;
 
-  meta_->sum += sum;
-
-  HistogramBase::Count old_redundant_count =
-      subtle::NoBarrier_Load(&meta_->redundant_count);
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-                          old_redundant_count + redundant_count);
+  IncreaseSum(sum);
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+                                    redundant_count);
 
   SampleCountPickleIterator pickle_iter(iter);
   return AddSubtractImpl(&pickle_iter, ADD);
 }
 
 void HistogramSamples::Subtract(const HistogramSamples& other) {
-  meta_->sum -= other.sum();
-
-  HistogramBase::Count old_redundant_count =
-      subtle::NoBarrier_Load(&meta_->redundant_count);
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-                          old_redundant_count - other.redundant_count());
+  IncreaseSum(-other.sum());
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
+                                    -other.redundant_count());
   bool success = AddSubtractImpl(other.Iterator().get(), SUBTRACT);
   DCHECK(success);
 }
 
 bool HistogramSamples::Serialize(Pickle* pickle) const {
-  if (!pickle->WriteInt64(meta_->sum))
+  if (!pickle->WriteInt64(sum()))
     return false;
-  if (!pickle->WriteInt(subtle::NoBarrier_Load(&meta_->redundant_count)))
+  if (!pickle->WriteInt(redundant_count()))
     return false;
 
   HistogramBase::Sample min;
   HistogramBase::Sample max;
   HistogramBase::Count count;
-  for (scoped_ptr<SampleCountIterator> it = Iterator();
-       !it->Done();
+  for (std::unique_ptr<SampleCountIterator> it = Iterator(); !it->Done();
        it->Next()) {
     it->Get(&min, &max, &count);
     if (!pickle->WriteInt(min) ||
@@ -154,17 +134,20 @@
 }
 
 void HistogramSamples::IncreaseSum(int64_t diff) {
+#ifdef ARCH_CPU_64_BITS
+  subtle::NoBarrier_AtomicIncrement(&meta_->sum, diff);
+#else
   meta_->sum += diff;
+#endif
 }
 
 void HistogramSamples::IncreaseRedundantCount(HistogramBase::Count diff) {
-  subtle::NoBarrier_Store(&meta_->redundant_count,
-      subtle::NoBarrier_Load(&meta_->redundant_count) + diff);
+  subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, diff);
 }
 
 SampleCountIterator::~SampleCountIterator() {}
 
-bool SampleCountIterator::GetBucketIndex(size_t* /* index */) const {
+bool SampleCountIterator::GetBucketIndex(size_t* /*index*/) const {
   DCHECK(!Done());
   return false;
 }
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
index 30bff84..e28573f 100644
--- a/base/metrics/histogram_samples.h
+++ b/base/metrics/histogram_samples.h
@@ -8,9 +8,10 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
+
 #include "base/atomicops.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 
 namespace base {
@@ -37,8 +38,13 @@
     // accuracy of this value; there may be races during histogram
     // accumulation and snapshotting that we choose to accept. It should
     // be treated as approximate.
-    // TODO(bcwhite): Change this to std::atomic<int64_t>.
+#ifdef ARCH_CPU_64_BITS
+    subtle::Atomic64 sum;
+#else
+    // 32-bit systems don't have atomic 64-bit operations. Use a basic type
+    // and don't worry about "shearing".
     int64_t sum;
+#endif
 
     // A "redundant" count helps identify memory corruption. It redundantly
     // stores the total number of samples accumulated in the histogram. We
@@ -68,12 +74,18 @@
 
   virtual void Subtract(const HistogramSamples& other);
 
-  virtual scoped_ptr<SampleCountIterator> Iterator() const = 0;
+  virtual std::unique_ptr<SampleCountIterator> Iterator() const = 0;
   virtual bool Serialize(Pickle* pickle) const;
 
   // Accessor fuctions.
   uint64_t id() const { return meta_->id; }
-  int64_t sum() const { return meta_->sum; }
+  int64_t sum() const {
+#ifdef ARCH_CPU_64_BITS
+    return subtle::NoBarrier_Load(&meta_->sum);
+#else
+    return meta_->sum;
+#endif
+  }
   HistogramBase::Count redundant_count() const {
     return subtle::NoBarrier_Load(&meta_->redundant_count);
   }
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
index 32dd4e6..dc6cb8a 100644
--- a/base/metrics/histogram_snapshot_manager.cc
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -4,7 +4,9 @@
 
 #include "base/metrics/histogram_snapshot_manager.h"
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
+#include "base/debug/alias.h"
 #include "base/metrics/histogram_flattener.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/statistics_recorder.h"
@@ -29,11 +31,12 @@
 
   DCHECK(owned_histograms_.empty());
 
-#ifdef DEBUG
-    CHECK(!iter->second.histogram);
-    CHECK(!iter->second.accumulated_samples);
-    CHECK(!(iter->second.inconsistencies &
-            HistogramBase::NEW_INCONSISTENCY_FOUND));
+#if DCHECK_IS_ON()
+  for (const auto& hash_and_info : known_histograms_) {
+    DCHECK(!hash_and_info.second.histogram);
+    DCHECK(!hash_and_info.second.accumulated_samples);
+    DCHECK(!(hash_and_info.second.inconsistencies &
+             HistogramBase::NEW_INCONSISTENCY_FOUND));
   }
 #endif
 }
@@ -43,7 +46,7 @@
 }
 
 void HistogramSnapshotManager::PrepareDeltaTakingOwnership(
-    scoped_ptr<HistogramBase> histogram) {
+    std::unique_ptr<HistogramBase> histogram) {
   PrepareSamples(histogram.get(), histogram->SnapshotDelta());
   owned_histograms_.push_back(std::move(histogram));
 }
@@ -53,11 +56,17 @@
 }
 
 void HistogramSnapshotManager::PrepareAbsoluteTakingOwnership(
-    scoped_ptr<const HistogramBase> histogram) {
+    std::unique_ptr<const HistogramBase> histogram) {
   PrepareSamples(histogram.get(), histogram->SnapshotSamples());
   owned_histograms_.push_back(std::move(histogram));
 }
 
+void HistogramSnapshotManager::PrepareFinalDeltaTakingOwnership(
+    std::unique_ptr<const HistogramBase> histogram) {
+  PrepareSamples(histogram.get(), histogram->SnapshotFinalDelta());
+  owned_histograms_.push_back(std::move(histogram));
+}
+
 void HistogramSnapshotManager::FinishDeltas() {
   DCHECK(preparing_deltas_);
 
@@ -96,7 +105,7 @@
 
 void HistogramSnapshotManager::PrepareSamples(
     const HistogramBase* histogram,
-    scoped_ptr<HistogramSamples> samples) {
+    std::unique_ptr<HistogramSamples> samples) {
   DCHECK(histogram_flattener_);
 
   // Get information known about this histogram.
@@ -117,6 +126,10 @@
     // The checksum should have caught this, so crash separately if it didn't.
     CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
     CHECK(false);  // Crash for the bucket order corruption.
+    // Ensure that compiler keeps around pointers to |histogram| and its
+    // internal |bucket_ranges_| for any minidumps.
+    base::debug::Alias(
+        static_cast<const Histogram*>(histogram)->bucket_ranges());
   }
   // Checksum corruption might not have caused order corruption.
   CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
index d44db19..83bd5fe 100644
--- a/base/metrics/histogram_snapshot_manager.h
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -40,18 +40,28 @@
   // |Histogram::kNoFlags|. Though any "forward" iterator will work, the
   // histograms over which it iterates *must* remain valid until this method
   // returns; the iterator cannot deallocate histograms once it iterates past
-  // them.
+  // them and FinishDeltas() has been called after.  StartDeltas() must be
+  // called before.
+  template <class ForwardHistogramIterator>
+  void PrepareDeltasWithoutStartFinish(ForwardHistogramIterator begin,
+                                       ForwardHistogramIterator end,
+                                       HistogramBase::Flags flags_to_set,
+                                       HistogramBase::Flags required_flags) {
+    for (ForwardHistogramIterator it = begin; it != end; ++it) {
+      (*it)->SetFlags(flags_to_set);
+      if (((*it)->flags() & required_flags) == required_flags)
+        PrepareDelta(*it);
+    }
+  }
+
+  // As above but also calls StartDeltas() and FinishDeltas().
   template <class ForwardHistogramIterator>
   void PrepareDeltas(ForwardHistogramIterator begin,
                      ForwardHistogramIterator end,
                      HistogramBase::Flags flags_to_set,
                      HistogramBase::Flags required_flags) {
     StartDeltas();
-    for (ForwardHistogramIterator it = begin; it != end; ++it) {
-      (*it)->SetFlags(flags_to_set);
-      if (((*it)->flags() & required_flags) == required_flags)
-        PrepareDelta(*it);
-    }
+    PrepareDeltasWithoutStartFinish(begin, end, flags_to_set, required_flags);
     FinishDeltas();
   }
 
@@ -62,14 +72,19 @@
   // until FinishDeltas() completes. PrepareAbsolute() works the same
   // but assumes there were no previous logged values and no future deltas
   // will be created (and thus can work on read-only histograms).
+  // PrepareFinalDelta() works like PrepareDelta() except that it does
+  // not update the previous logged values and can thus be used with
+  // read-only files.
   // Use Prepare*TakingOwnership() if it is desireable to have this class
   // automatically delete the histogram once it is "finished".
   void StartDeltas();
   void PrepareDelta(HistogramBase* histogram);
-  void PrepareDeltaTakingOwnership(scoped_ptr<HistogramBase> histogram);
+  void PrepareDeltaTakingOwnership(std::unique_ptr<HistogramBase> histogram);
   void PrepareAbsolute(const HistogramBase* histogram);
   void PrepareAbsoluteTakingOwnership(
-      scoped_ptr<const HistogramBase> histogram);
+      std::unique_ptr<const HistogramBase> histogram);
+  void PrepareFinalDeltaTakingOwnership(
+      std::unique_ptr<const HistogramBase> histogram);
   void FinishDeltas();
 
  private:
@@ -100,7 +115,7 @@
   // Capture and hold samples from a histogram. This does all the heavy
   // lifting for PrepareDelta() and PrepareAbsolute().
   void PrepareSamples(const HistogramBase* histogram,
-                      scoped_ptr<HistogramSamples> samples);
+                      std::unique_ptr<HistogramSamples> samples);
 
   // Try to detect and fix count inconsistency of logged samples.
   void InspectLoggedSamplesInconsistency(
@@ -113,7 +128,7 @@
 
   // Collection of histograms of which ownership has been passed to this
   // object. They will be deleted by FinishDeltas().
-  std::vector<scoped_ptr<const HistogramBase>> owned_histograms_;
+  std::vector<std::unique_ptr<const HistogramBase>> owned_histograms_;
 
   // Indicates if deltas are currently being prepared.
   bool preparing_deltas_;
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index 03dc7bd..668ac1b 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -9,11 +9,11 @@
 #include <stdint.h>
 
 #include <climits>
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_histogram_allocator.h"
@@ -55,36 +55,34 @@
   }
 
   void InitializeStatisticsRecorder() {
-    StatisticsRecorder::ResetForTesting();
-    statistics_recorder_ = new StatisticsRecorder();
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_.reset(new StatisticsRecorder());
   }
 
   void UninitializeStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = NULL;
+    statistics_recorder_.reset();
   }
 
   void CreatePersistentHistogramAllocator() {
     // By getting the results-histogram before any persistent allocator
     // is attached, that histogram is guaranteed not to be stored in
     // any persistent memory segment (which simplifies some tests).
-    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+    GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
 
-    PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+    GlobalHistogramAllocator::CreateWithLocalMemory(
         kAllocatorMemorySize, 0, "HistogramAllocatorTest");
-    allocator_ =
-        PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
   }
 
   void DestroyPersistentHistogramAllocator() {
     allocator_ = nullptr;
-    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+    GlobalHistogramAllocator::ReleaseForTesting();
   }
 
   const bool use_persistent_histogram_allocator_;
 
-  StatisticsRecorder* statistics_recorder_ = nullptr;
-  scoped_ptr<char[]> allocator_memory_;
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<char[]> allocator_memory_;
   PersistentMemoryAllocator* allocator_ = nullptr;
 
  private:
@@ -113,6 +111,14 @@
       "TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
   EXPECT_TRUE(custom_histogram);
 
+  // Macros that create hitograms have an internal static variable which will
+  // continue to point to those from the very first run of this method even
+  // during subsequent runs.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
   // Use standard macros (but with fixed samples)
   LOCAL_HISTOGRAM_TIMES("Test2Histogram", TimeDelta::FromDays(1));
   LOCAL_HISTOGRAM_COUNTS("Test3Histogram", 30);
@@ -136,12 +142,12 @@
   HistogramBase* histogram = LinearHistogram::FactoryGet(
       "DuplicatedHistogram", 1, 101, 102, HistogramBase::kNoFlags);
 
-  scoped_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
   EXPECT_EQ(2, samples->TotalCount());
   EXPECT_EQ(2, samples->GetCount(10));
 }
 
-// Check that delta calculations work correct.
+// Check that delta calculations work correctly.
 TEST_P(HistogramTest, DeltaTest) {
   HistogramBase* histogram =
       Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
@@ -150,7 +156,7 @@
   histogram->Add(10);
   histogram->Add(50);
 
-  scoped_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
   EXPECT_EQ(3, samples->TotalCount());
   EXPECT_EQ(1, samples->GetCount(1));
   EXPECT_EQ(1, samples->GetCount(10));
@@ -170,6 +176,32 @@
   EXPECT_EQ(0, samples->TotalCount());
 }
 
+// Check that final-delta calculations work correctly.
+TEST_P(HistogramTest, FinalDeltaTest) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet("FinalDeltaHistogram", 1, 64, 8,
+                            HistogramBase::kNoFlags);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+  histogram->Add(2);
+  histogram->Add(50);
+
+  samples = histogram->SnapshotFinalDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(2));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+}
+
 TEST_P(HistogramTest, ExponentialRangesTest) {
   // Check that we got a nice exponential when there was enough room.
   BucketRanges ranges(9);
@@ -330,7 +362,7 @@
   histogram->AddCount(20, 15);
   histogram->AddCount(30, 14);
 
-  scoped_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
   EXPECT_EQ(29, samples->TotalCount());
   EXPECT_EQ(15, samples->GetCount(20));
   EXPECT_EQ(14, samples->GetCount(30));
@@ -338,7 +370,7 @@
   histogram->AddCount(20, 25);
   histogram->AddCount(30, 24);
 
-  scoped_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
   EXPECT_EQ(78, samples2->TotalCount());
   EXPECT_EQ(40, samples2->GetCount(20));
   EXPECT_EQ(38, samples2->GetCount(30));
@@ -353,7 +385,7 @@
   histogram->AddCount(200000000, 15);
   histogram->AddCount(300000000, 14);
 
-  scoped_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
   EXPECT_EQ(29, samples->TotalCount());
   EXPECT_EQ(15, samples->GetCount(200000000));
   EXPECT_EQ(14, samples->GetCount(300000000));
@@ -361,7 +393,7 @@
   histogram->AddCount(200000000, 25);
   histogram->AddCount(300000000, 24);
 
-  scoped_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
   EXPECT_EQ(78, samples2->TotalCount());
   EXPECT_EQ(40, samples2->GetCount(200000000));
   EXPECT_EQ(38, samples2->GetCount(300000000));
@@ -383,7 +415,7 @@
   histogram->Add(10000);
 
   // Verify they landed in the underflow, and overflow buckets.
-  scoped_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+  std::unique_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
   EXPECT_EQ(2, samples->GetCountAtIndex(0));
   EXPECT_EQ(0, samples->GetCountAtIndex(1));
   size_t array_size = histogram->bucket_count();
@@ -407,7 +439,7 @@
   test_custom_histogram->Add(INT_MAX);
 
   // Verify they landed in the underflow, and overflow buckets.
-  scoped_ptr<SampleVector> custom_samples =
+  std::unique_ptr<SampleVector> custom_samples =
       test_custom_histogram->SnapshotSampleVector();
   EXPECT_EQ(2, custom_samples->GetCountAtIndex(0));
   EXPECT_EQ(0, custom_samples->GetCountAtIndex(1));
@@ -431,12 +463,19 @@
   }
 
   // Check to see that the bucket counts reflect our additions.
-  scoped_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+  std::unique_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
   for (int i = 0; i < 8; i++)
     EXPECT_EQ(i + 1, samples->GetCountAtIndex(i));
 }
 
 TEST_P(HistogramTest, CorruptSampleCounts) {
+  // The internal code creates histograms via macros and thus keeps static
+  // pointers to them. If those pointers are to persistent memory which will
+  // be free'd then any following calls to that code will crash with a
+  // segmentation violation.
+  if (use_persistent_histogram_allocator_)
+    return;
+
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
@@ -444,7 +483,7 @@
   histogram->Add(20);
   histogram->Add(40);
 
-  scoped_ptr<SampleVector> snapshot = histogram->SnapshotSampleVector();
+  std::unique_ptr<SampleVector> snapshot = histogram->SnapshotSampleVector();
   EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
             histogram->FindCorruption(*snapshot));
   EXPECT_EQ(2, snapshot->redundant_count());
@@ -467,7 +506,7 @@
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
-  scoped_ptr<HistogramSamples> snapshot = histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotSamples();
   EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
             histogram->FindCorruption(*snapshot));
 
@@ -615,7 +654,7 @@
   // Calculate cost of creating histograms.
   TimeTicks create_start = TimeTicks::Now();
   for (int i = 0; i < kTestCreateCount; ++i) {
-    Histogram::FactoryGet(histogram_names[i], 0, 100, 10,
+    Histogram::FactoryGet(histogram_names[i], 1, 100, 10,
                           HistogramBase::kNoFlags);
   }
   TimeDelta create_ticks = TimeTicks::Now() - create_start;
@@ -635,7 +674,7 @@
     const int i_mult = 6007;
     static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
     int index = (i * i_mult) & (kTestCreateCount - 1);
-    Histogram::FactoryGet(histogram_names[index], 0, 100, 10,
+    Histogram::FactoryGet(histogram_names[index], 1, 100, 10,
                           HistogramBase::kNoFlags);
   }
   TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
@@ -648,7 +687,7 @@
 
   // Calculate cost of accessing histograms.
   HistogramBase* histogram = Histogram::FactoryGet(
-      histogram_names[0], 0, 100, 10, HistogramBase::kNoFlags);
+      histogram_names[0], 1, 100, 10, HistogramBase::kNoFlags);
   ASSERT_TRUE(histogram);
   TimeTicks add_start = TimeTicks::Now();
   for (int i = 0; i < kTestAddCount; ++i)
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 6006d31..9608fba 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -4,12 +4,16 @@
 
 #include "base/metrics/persistent_histogram_allocator.h"
 
+#include <memory>
+
+#include "base/files/important_file_writer.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_sample_map.h"
 #include "base/metrics/sparse_histogram.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/synchronization/lock.h"
@@ -39,20 +43,20 @@
 // The current globally-active persistent allocator for all new histograms.
 // The object held here will obviously not be destructed at process exit
 // but that's best since PersistentMemoryAllocator objects (that underlie
-// PersistentHistogramAllocator objects) are explicitly forbidden from doing
+// GlobalHistogramAllocator objects) are explicitly forbidden from doing
 // anything essential at exit anyway due to the fact that they depend on data
 // managed elsewhere and which could be destructed first.
-PersistentHistogramAllocator* g_allocator;
+GlobalHistogramAllocator* g_allocator = nullptr;
 
 // Take an array of range boundaries and create a proper BucketRanges object
 // which is returned to the caller. A return of nullptr indicates that the
 // passed boundaries are invalid.
-scoped_ptr<BucketRanges> CreateRangesFromData(
+std::unique_ptr<BucketRanges> CreateRangesFromData(
     HistogramBase::Sample* ranges_data,
     uint32_t ranges_checksum,
     size_t count) {
   // To avoid racy destruction at shutdown, the following may be leaked.
-  scoped_ptr<BucketRanges> ranges(new BucketRanges(count));
+  std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
   DCHECK_EQ(count, ranges->size());
   for (size_t i = 0; i < count; ++i) {
     if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
@@ -89,6 +93,134 @@
   "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
 };
 
+
+PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
+    PersistentMemoryAllocator* allocator)
+    : allocator_(allocator), record_iterator_(allocator) {}
+
+PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() {}
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
+                                                          const void* user) {
+  base::AutoLock auto_lock(lock_);
+  return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
+}
+
+PersistentSampleMapRecords*
+PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
+    uint64_t id) {
+  lock_.AssertAcquired();
+
+  auto found = sample_records_.find(id);
+  if (found != sample_records_.end())
+    return found->second.get();
+
+  std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
+  samples = WrapUnique(new PersistentSampleMapRecords(this, id));
+  return samples.get();
+}
+
+bool PersistentSparseHistogramDataManager::LoadRecords(
+    PersistentSampleMapRecords* sample_map_records) {
+  // DataManager must be locked in order to access the found_ field of any
+  // PersistentSampleMapRecords object.
+  base::AutoLock auto_lock(lock_);
+  bool found = false;
+
+  // If there are already "found" entries for the passed object, move them.
+  if (!sample_map_records->found_.empty()) {
+    sample_map_records->records_.reserve(sample_map_records->records_.size() +
+                                         sample_map_records->found_.size());
+    sample_map_records->records_.insert(sample_map_records->records_.end(),
+                                        sample_map_records->found_.begin(),
+                                        sample_map_records->found_.end());
+    sample_map_records->found_.clear();
+    found = true;
+  }
+
+  // Acquiring a lock is a semi-expensive operation so load some records with
+  // each call. More than this number may be loaded if it takes longer to
+  // find at least one matching record for the passed object.
+  const int kMinimumNumberToLoad = 10;
+  const uint64_t match_id = sample_map_records->sample_map_id_;
+
+  // Loop while no enty is found OR we haven't yet loaded the minimum number.
+  // This will continue reading even after a match is found.
+  for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
+    // Get the next sample-record. The iterator will always resume from where
+    // it left off even if it previously had nothing further to return.
+    uint64_t found_id;
+    PersistentMemoryAllocator::Reference ref =
+        PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
+                                                     &found_id);
+
+    // Stop immediately if there are none.
+    if (!ref)
+      break;
+
+    // The sample-record could be for any sparse histogram. Add the reference
+    // to the appropriate collection for later use.
+    if (found_id == match_id) {
+      sample_map_records->records_.push_back(ref);
+      found = true;
+    } else {
+      PersistentSampleMapRecords* samples =
+          GetSampleMapRecordsWhileLocked(found_id);
+      DCHECK(samples);
+      samples->found_.push_back(ref);
+    }
+  }
+
+  return found;
+}
+
+
+PersistentSampleMapRecords::PersistentSampleMapRecords(
+    PersistentSparseHistogramDataManager* data_manager,
+    uint64_t sample_map_id)
+    : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
+
+PersistentSampleMapRecords::~PersistentSampleMapRecords() {}
+
+PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
+    const void* user) {
+  DCHECK(!user_);
+  user_ = user;
+  seen_ = 0;
+  return this;
+}
+
+void PersistentSampleMapRecords::Release(const void* user) {
+  DCHECK_EQ(user_, user);
+  user_ = nullptr;
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
+  DCHECK(user_);
+
+  // If there are no unseen records, lock and swap in all the found ones.
+  if (records_.size() == seen_) {
+    if (!data_manager_->LoadRecords(this))
+      return false;
+  }
+
+  // Return the next record. Records *must* be returned in the same order
+  // they are found in the persistent memory in order to ensure that all
+  // objects using this data always have the same state. Race conditions
+  // can cause duplicate records so using the "first found" is the only
+  // guarantee that all objects always access the same one.
+  DCHECK_LT(seen_, records_.size());
+  return records_[seen_++];
+}
+
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
+    HistogramBase::Sample value) {
+  return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
+                                                     sample_map_id_, value);
+}
+
+
 // This data will be held in persistent memory in order for processes to
 // locate and use histograms created elsewhere.
 struct PersistentHistogramAllocator::PersistentHistogramData {
@@ -109,16 +241,28 @@
   char name[1];
 };
 
+PersistentHistogramAllocator::Iterator::Iterator(
+    PersistentHistogramAllocator* allocator)
+    : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
+
+std::unique_ptr<HistogramBase>
+PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
+    if (ref != ignore)
+      return allocator_->GetHistogram(ref);
+  }
+  return nullptr;
+}
+
+
 PersistentHistogramAllocator::PersistentHistogramAllocator(
-    scoped_ptr<PersistentMemoryAllocator> memory)
-    : memory_allocator_(std::move(memory)) {}
+    std::unique_ptr<PersistentMemoryAllocator> memory)
+    : memory_allocator_(std::move(memory)),
+      sparse_histogram_data_manager_(memory_allocator_.get()) {}
 
 PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
 
-void PersistentHistogramAllocator::CreateIterator(Iterator* iter) {
-  memory_allocator_->CreateIterator(&iter->memory_iter);
-}
-
 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
   memory_allocator_->CreateTrackingHistograms(name);
 }
@@ -127,6 +271,10 @@
   memory_allocator_->UpdateTrackingHistograms();
 }
 
+void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
+  subtle::NoBarrier_Store(&last_created_, 0);
+}
+
 // static
 HistogramBase*
 PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
@@ -148,9 +296,14 @@
     if (!initialized) {
       initialized = true;
       if (g_allocator) {
+// Don't log in release-with-asserts builds, otherwise the test_installer step
+// fails because this code writes to a log file before the installer code had a
+// chance to set the log file's location.
+#if !defined(DCHECK_ALWAYS_ON)
         DLOG(WARNING) << "Creating the results-histogram inside persistent"
                       << " memory can cause future allocations to crash if"
                       << " that memory is ever released (for testing).";
+#endif
       }
 
       histogram_pointer = LinearHistogram::FactoryGet(
@@ -173,103 +326,7 @@
 }
 
 // static
-void PersistentHistogramAllocator::SetGlobalAllocator(
-    scoped_ptr<PersistentHistogramAllocator> allocator) {
-  // Releasing or changing an allocator is extremely dangerous because it
-  // likely has histograms stored within it. If the backing memory is also
-  // also released, future accesses to those histograms will seg-fault.
-  CHECK(!g_allocator);
-  g_allocator = allocator.release();
-
-  size_t existing = StatisticsRecorder::GetHistogramCount();
-  DLOG_IF(WARNING, existing)
-      << existing
-      << " histograms were created before persistence was enabled.";
-}
-
-// static
-PersistentHistogramAllocator*
-PersistentHistogramAllocator::GetGlobalAllocator() {
-  return g_allocator;
-}
-
-// static
-scoped_ptr<PersistentHistogramAllocator>
-PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting() {
-  PersistentHistogramAllocator* histogram_allocator = g_allocator;
-  if (!histogram_allocator)
-    return nullptr;
-  PersistentMemoryAllocator* memory_allocator =
-      histogram_allocator->memory_allocator();
-
-  // Before releasing the memory, it's necessary to have the Statistics-
-  // Recorder forget about the histograms contained therein; otherwise,
-  // some operations will try to access them and the released memory.
-  PersistentMemoryAllocator::Iterator iter;
-  PersistentMemoryAllocator::Reference ref;
-  uint32_t type_id;
-  memory_allocator->CreateIterator(&iter);
-  while ((ref = memory_allocator->GetNextIterable(&iter, &type_id)) != 0) {
-    if (type_id == kTypeIdHistogram) {
-      PersistentHistogramData* histogram_data =
-          memory_allocator->GetAsObject<PersistentHistogramData>(
-              ref, kTypeIdHistogram);
-      DCHECK(histogram_data);
-      StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
-
-      // If a test breaks here then a memory region containing a histogram
-      // actively used by this code is being released back to the test.
-      // If that memory segment were to be deleted, future calls to create
-      // persistent histograms would crash. To avoid this, have the test call
-      // the method GetCreateHistogramResultHistogram() *before* setting
-      // the (temporary) memory allocator via SetGlobalAllocator() so that
-      // histogram is instead allocated from the process heap.
-      DCHECK_NE(kResultHistogram, histogram_data->name);
-    }
-  }
-
-  g_allocator = nullptr;
-  return make_scoped_ptr(histogram_allocator);
-};
-
-// static
-void PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory(
-    void* base,
-    size_t size,
-    size_t page_size,
-    uint64_t id,
-    StringPiece name) {
-  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
-      make_scoped_ptr(new PersistentMemoryAllocator(
-          base, size, page_size, id, name, false)))));
-}
-
-// static
-void PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
-    size_t size,
-    uint64_t id,
-    StringPiece name) {
-  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
-      make_scoped_ptr(new LocalPersistentMemoryAllocator(size, id, name)))));
-}
-
-// static
-void PersistentHistogramAllocator::CreateGlobalAllocatorOnSharedMemory(
-    size_t size,
-    const SharedMemoryHandle& handle) {
-  scoped_ptr<SharedMemory> shm(new SharedMemory(handle, /*readonly=*/false));
-  if (!shm->Map(size)) {
-    NOTREACHED();
-    return;
-  }
-
-  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
-      make_scoped_ptr(new SharedPersistentMemoryAllocator(
-          std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
-}
-
-// static
-scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
     PersistentHistogramData* histogram_data_ptr) {
   if (!histogram_data_ptr) {
     RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
@@ -279,10 +336,10 @@
 
   // Sparse histograms are quite different so handle them as a special case.
   if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
-    scoped_ptr<HistogramBase> histogram = SparseHistogram::PersistentCreate(
-        memory_allocator(), histogram_data_ptr->name,
-        &histogram_data_ptr->samples_metadata,
-        &histogram_data_ptr->logged_metadata);
+    std::unique_ptr<HistogramBase> histogram =
+        SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
+                                          &histogram_data_ptr->samples_metadata,
+                                          &histogram_data_ptr->logged_metadata);
     DCHECK(histogram);
     histogram->SetFlags(histogram_data_ptr->flags);
     RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
@@ -314,7 +371,7 @@
     return nullptr;
   }
 
-  scoped_ptr<const BucketRanges> created_ranges =
+  std::unique_ptr<const BucketRanges> created_ranges =
       CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
                            histogram_data.bucket_count + 1);
   if (!created_ranges) {
@@ -346,7 +403,7 @@
       counts_data + histogram_data.bucket_count;
 
   std::string name(histogram_data_ptr->name);
-  scoped_ptr<HistogramBase> histogram;
+  std::unique_ptr<HistogramBase> histogram;
   switch (histogram_data.histogram_type) {
     case HISTOGRAM:
       histogram = Histogram::PersistentCreate(
@@ -393,7 +450,7 @@
   return histogram;
 }
 
-scoped_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
     Reference ref) {
   // Unfortunately, the histogram "pickle" methods cannot be used as part of
   // the persistance because the deserialization methods always create local
@@ -413,21 +470,6 @@
   return CreateHistogram(histogram_data);
 }
 
-scoped_ptr<HistogramBase>
-PersistentHistogramAllocator::GetNextHistogramWithIgnore(Iterator* iter,
-                                                         Reference ignore) {
-  PersistentMemoryAllocator::Reference ref;
-  uint32_t type_id;
-  while ((ref = memory_allocator_->GetNextIterable(&iter->memory_iter,
-                                                   &type_id)) != 0) {
-    if (ref == ignore)
-      continue;
-    if (type_id == kTypeIdHistogram)
-      return GetHistogram(ref);
-  }
-  return nullptr;
-}
-
 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
                                                      bool registered) {
   // If the created persistent histogram was registered then it needs to
@@ -441,7 +483,13 @@
     memory_allocator_->SetType(ref, 0);
 }
 
-scoped_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
+PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
+    uint64_t id,
+    const void* user) {
+  return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
+}
+
+std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
     HistogramType histogram_type,
     const std::string& name,
     int minimum,
@@ -521,14 +569,15 @@
     // using what is already known above but avoids duplicating the switch
     // statement here and serves as a double-check that everything is
     // correct before commiting the new histogram to persistent space.
-    scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
+    std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
     DCHECK(histogram);
     if (ref_ptr != nullptr)
       *ref_ptr = histogram_ref;
 
     // By storing the reference within the allocator to this histogram, the
     // next import (which will happen before the next histogram creation)
-    // will know to skip it. See also the comment in ImportGlobalHistograms().
+    // will know to skip it.
+    // See also the comment in ImportHistogramsToStatisticsRecorder().
     subtle::NoBarrier_Store(&last_created_, histogram_ref);
     return histogram;
   }
@@ -548,39 +597,170 @@
   return nullptr;
 }
 
+GlobalHistogramAllocator::~GlobalHistogramAllocator() {}
+
 // static
-void PersistentHistogramAllocator::ImportGlobalHistograms() {
-  // The lock protects against concurrent access to the iterator and is created
-  // in a thread-safe manner when needed.
-  static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER;
+void GlobalHistogramAllocator::CreateWithPersistentMemory(
+    void* base,
+    size_t size,
+    size_t page_size,
+    uint64_t id,
+    StringPiece name) {
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new PersistentMemoryAllocator(
+          base, size, page_size, id, name, false)))));
+}
 
-  if (g_allocator) {
-    // TODO(bcwhite): Investigate a lock-free, thread-safe iterator.
-    base::AutoLock auto_lock(lock.Get());
+// static
+void GlobalHistogramAllocator::CreateWithLocalMemory(
+    size_t size,
+    uint64_t id,
+    StringPiece name) {
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)))));
+}
 
-    // Each call resumes from where it last left off so a persistant iterator
-    // is needed. This class has a constructor so even the definition has to
-    // be protected by the lock in order to be thread-safe.
-    static Iterator iter;
-    if (iter.is_clear())
-      g_allocator->CreateIterator(&iter);
+// static
+void GlobalHistogramAllocator::CreateWithSharedMemory(
+    std::unique_ptr<SharedMemory> memory,
+    size_t size,
+    uint64_t /*id*/,
+    StringPiece /*name*/) {
+  if ((!memory->memory() && !memory->Map(size)) ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*memory)) {
+    NOTREACHED();
+    return;
+  }
 
-    // Skip the import if it's the histogram that was last created. Should a
-    // race condition cause the "last created" to be overwritten before it
-    // is recognized here then the histogram will be created and be ignored
-    // when it is detected as a duplicate by the statistics-recorder. This
-    // simple check reduces the time of creating persistent histograms by
-    // about 40%.
-    Reference last_created =
-        subtle::NoBarrier_Load(&g_allocator->last_created_);
+  DCHECK_LE(memory->mapped_size(), size);
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new SharedPersistentMemoryAllocator(
+          std::move(memory), 0, StringPiece(), /*readonly=*/false)))));
+}
 
-    while (true) {
-      scoped_ptr<HistogramBase> histogram =
-          g_allocator->GetNextHistogramWithIgnore(&iter, last_created);
-      if (!histogram)
-        break;
-      StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
-    }
+// static
+void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t size) {
+  std::unique_ptr<SharedMemory> shm(
+      new SharedMemory(handle, /*readonly=*/false));
+  if (!shm->Map(size) ||
+      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+    NOTREACHED();
+    return;
+  }
+
+  Set(WrapUnique(new GlobalHistogramAllocator(
+      WrapUnique(new SharedPersistentMemoryAllocator(
+          std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
+}
+
+// static
+void GlobalHistogramAllocator::Set(
+    std::unique_ptr<GlobalHistogramAllocator> allocator) {
+  // Releasing or changing an allocator is extremely dangerous because it
+  // likely has histograms stored within it. If the backing memory is also
+  // also released, future accesses to those histograms will seg-fault.
+  CHECK(!g_allocator);
+  g_allocator = allocator.release();
+  size_t existing = StatisticsRecorder::GetHistogramCount();
+
+  DVLOG_IF(1, existing)
+      << existing << " histograms were created before persistence was enabled.";
+}
+
+// static
+GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
+  return g_allocator;
+}
+
+// static
+std::unique_ptr<GlobalHistogramAllocator>
+GlobalHistogramAllocator::ReleaseForTesting() {
+  GlobalHistogramAllocator* histogram_allocator = g_allocator;
+  if (!histogram_allocator)
+    return nullptr;
+  PersistentMemoryAllocator* memory_allocator =
+      histogram_allocator->memory_allocator();
+
+  // Before releasing the memory, it's necessary to have the Statistics-
+  // Recorder forget about the histograms contained therein; otherwise,
+  // some operations will try to access them and the released memory.
+  PersistentMemoryAllocator::Iterator iter(memory_allocator);
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
+    PersistentHistogramData* histogram_data =
+        memory_allocator->GetAsObject<PersistentHistogramData>(
+            ref, kTypeIdHistogram);
+    DCHECK(histogram_data);
+    StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
+
+    // If a test breaks here then a memory region containing a histogram
+    // actively used by this code is being released back to the test.
+    // If that memory segment were to be deleted, future calls to create
+    // persistent histograms would crash. To avoid this, have the test call
+    // the method GetCreateHistogramResultHistogram() *before* setting
+    // the (temporary) memory allocator via SetGlobalAllocator() so that
+    // histogram is instead allocated from the process heap.
+    DCHECK_NE(kResultHistogram, histogram_data->name);
+  }
+
+  g_allocator = nullptr;
+  return WrapUnique(histogram_allocator);
+};
+
+void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
+  persistent_location_ = location;
+}
+
+bool GlobalHistogramAllocator::WriteToPersistentLocation() {
+#if defined(OS_NACL)
+  // NACL doesn't support file operations, including ImportantFileWriter.
+  NOTREACHED();
+  return false;
+#else
+  // Stop if no destination is set.
+  if (persistent_location_.empty()) {
+    NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
+                 << " to file because no location was set.";
+    return false;
+  }
+
+  StringPiece contents(static_cast<const char*>(data()), used());
+  if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
+                                                contents)) {
+    LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
+               << " to file: " << persistent_location_.value();
+    return false;
+  }
+
+  return true;
+#endif
+}
+
+GlobalHistogramAllocator::GlobalHistogramAllocator(
+    std::unique_ptr<PersistentMemoryAllocator> memory)
+    : PersistentHistogramAllocator(std::move(memory)),
+      import_iterator_(this) {}
+
+void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
+  // Skip the import if it's the histogram that was last created. Should a
+  // race condition cause the "last created" to be overwritten before it
+  // is recognized here then the histogram will be created and be ignored
+  // when it is detected as a duplicate by the statistics-recorder. This
+  // simple check reduces the time of creating persistent histograms by
+  // about 40%.
+  Reference record_to_ignore = last_created();
+
+  // There is no lock on this because the iterator is lock-free while still
+  // guaranteed to only return each entry only once. The StatisticsRecorder
+  // has its own lock so the Register operation is safe.
+  while (true) {
+    std::unique_ptr<HistogramBase> histogram =
+        import_iterator_.GetNextWithIgnore(record_to_ignore);
+    if (!histogram)
+      break;
+    StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
   }
 }
 
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index cc8d023..8df45f2 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -5,41 +5,209 @@
 #ifndef BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
 #define BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
 
+#include <map>
+#include <memory>
+
 #include "base/atomicops.h"
 #include "base/base_export.h"
 #include "base/feature_list.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/shared_memory.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/persistent_memory_allocator.h"
 #include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
 
 namespace base {
 
+class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
+
 // Feature definition for enabling histogram persistence.
 BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
 
+
+// A data manager for sparse histograms so each instance of such doesn't have
+// to separately iterate over the entire memory segment. Though this class
+// will generally be accessed through the PersistentHistogramAllocator above,
+// it can be used independently on any PersistentMemoryAllocator (making it
+// useable for testing). This object supports only one instance of a sparse
+// histogram for a given id. Tests that create multiple identical histograms,
+// perhaps to simulate multiple processes, should create a separate manager
+// for each.
+class BASE_EXPORT PersistentSparseHistogramDataManager {
+ public:
+  // Constructs the data manager. The allocator must live longer than any
+  // managers that reference it.
+  explicit PersistentSparseHistogramDataManager(
+      PersistentMemoryAllocator* allocator);
+
+  ~PersistentSparseHistogramDataManager();
+
+  // Returns the object that manages the persistent-sample-map records for a
+  // given |id|. Only one |user| of this data is allowed at a time. This does
+  // an automatic Acquire() on the records. The user must call Release() on
+  // the returned object when it is finished with it. Ownership of the records
+  // object stays with this manager.
+  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+                                                  const void* user);
+
+  // Convenience method that gets the object for a given reference so callers
+  // don't have to also keep their own pointer to the appropriate allocator.
+  template <typename T>
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+    return allocator_->GetAsObject<T>(ref, type_id);
+  }
+
+ private:
+  friend class PersistentSampleMapRecords;
+
+  // Gets the object holding records for a given sample-map id when |lock_|
+  // has already been acquired.
+  PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id);
+
+  // Loads sample-map records looking for those belonging to the specified
+  // |load_id|. Records found for other sample-maps are held for later use
+  // without having to iterate again. This should be called only from a
+  // PersistentSampleMapRecords object because those objects have a contract
+  // that there are no other threads accessing the internal records_ field
+  // of the object that is passed in.
+  bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
+
+  // Weak-pointer to the allocator used by the sparse histograms.
+  PersistentMemoryAllocator* allocator_;
+
+  // Iterator within the allocator for finding sample records.
+  PersistentMemoryAllocator::Iterator record_iterator_;
+
+  // Mapping of sample-map IDs to their sample records.
+  std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
+      sample_records_;
+
+  // A lock used for synchronizing changes to sample_records_.
+  base::Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSparseHistogramDataManager);
+};
+
+
+// This class manages sample-records used by a PersistentSampleMap container
+// that underlies a persistent SparseHistogram object. It is broken out into a
+// top-level class so that it can be forward-declared in other header files
+// rather than include this entire file as would be necessary if it were
+// declared within the PersistentSparseHistogramDataManager class above.
+class BASE_EXPORT PersistentSampleMapRecords {
+ public:
+  // Constructs an instance of this class. The manager object must live longer
+  // than all instances of this class that reference it, which is not usually
+  // a problem since these objects are generally managed from within that
+  // manager instance.
+  PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
+                             uint64_t sample_map_id);
+
+  ~PersistentSampleMapRecords();
+
+  // Resets the internal state for a new object using this data. The return
+  // value is "this" as a convenience.
+  PersistentSampleMapRecords* Acquire(const void* user);
+
+  // Indicates that the using object is done with this data.
+  void Release(const void* user);
+
+  // Gets the next reference to a persistent sample-map record. The type and
+  // layout of the data being referenced is defined entirely within the
+  // PersistentSampleMap class.
+  PersistentMemoryAllocator::Reference GetNext();
+
+  // Creates a new persistent sample-map record for sample |value| and returns
+  // a reference to it.
+  PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value);
+
+  // Convenience method that gets the object for a given reference so callers
+  // don't have to also keep their own pointer to the appropriate allocator.
+  // This is expected to be used with the SampleRecord structure defined inside
+  // the persistent_sample_map.cc file but since that isn't exported (for
+  // cleanliness of the interface), a template is defined that will be
+  // resolved when used inside that file.
+  template <typename T>
+  T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
+    return data_manager_->GetAsObject<T>(ref, type_id);
+  }
+
+ private:
+  friend PersistentSparseHistogramDataManager;
+
+  // Weak-pointer to the parent data-manager object.
+  PersistentSparseHistogramDataManager* data_manager_;
+
+  // ID of PersistentSampleMap to which these records apply.
+  const uint64_t sample_map_id_;
+
+  // The current user of this set of records. It is used to ensure that no
+  // more than one object is using these records at a given time.
+  const void* user_ = nullptr;
+
+  // This is the count of how many "records" have already been read by the
+  // owning sample-map.
+  size_t seen_ = 0;
+
+  // This is the set of records previously found for a sample map. Because
+  // there is ever only one object with a given ID (typically a hash of a
+  // histogram name) and because the parent SparseHistogram has acquired
+  // its own lock before accessing the PersistentSampleMap it controls, this
+  // list can be accessed without acquiring any additional lock.
+  std::vector<PersistentMemoryAllocator::Reference> records_;
+
+  // This is the set of records found during iteration through memory. It
+  // is appended in bulk to "records". Access to this vector can be done
+  // only while holding the parent manager's lock.
+  std::vector<PersistentMemoryAllocator::Reference> found_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMapRecords);
+};
+
+
 // This class manages histograms created within a PersistentMemoryAllocator.
 class BASE_EXPORT PersistentHistogramAllocator {
  public:
-  // This iterator is used for fetching persistent histograms from an allocator.
-  class Iterator {
+  // A reference to a histogram. While this is implemented as PMA::Reference,
+  // it is not conceptually the same thing. Outside callers should always use
+  // a Reference matching the class it is for and not mix the two.
+  using Reference = PersistentMemoryAllocator::Reference;
+
+  // Iterator used for fetching persistent histograms from an allocator.
+  // It is lock-free and thread-safe.
+  // See PersistentMemoryAllocator::Iterator for more information.
+  class BASE_EXPORT Iterator {
    public:
-    bool is_clear() { return memory_iter.is_clear(); }
+    // Constructs an iterator on a given |allocator|, starting at the beginning.
+    // The allocator must live beyond the lifetime of the iterator.
+    explicit Iterator(PersistentHistogramAllocator* allocator);
+
+    // Gets the next histogram from persistent memory; returns null if there
+    // are no more histograms to be found. This may still be called again
+    // later to retrieve any new histograms added in the meantime.
+    std::unique_ptr<HistogramBase> GetNext() { return GetNextWithIgnore(0); }
+
+    // Gets the next histogram from persistent memory, ignoring one particular
+    // reference in the process. Pass |ignore| of zero (0) to ignore nothing.
+    std::unique_ptr<HistogramBase> GetNextWithIgnore(Reference ignore);
 
    private:
-    friend class PersistentHistogramAllocator;
+    // Weak-pointer to histogram allocator being iterated over.
+    PersistentHistogramAllocator* allocator_;
 
-    // The iterator used for stepping through persistent memory iterables.
-    PersistentMemoryAllocator::Iterator memory_iter;
+    // The iterator used for stepping through objects in persistent memory.
+    // It is lock-free and thread-safe which is why this class is also such.
+    PersistentMemoryAllocator::Iterator memory_iter_;
+
+    DISALLOW_COPY_AND_ASSIGN(Iterator);
   };
 
-  using Reference = PersistentMemoryAllocator::Reference;
-
   // A PersistentHistogramAllocator is constructed from a PersistentMemory-
   // Allocator object of which it takes ownership.
-  PersistentHistogramAllocator(scoped_ptr<PersistentMemoryAllocator> memory);
-  ~PersistentHistogramAllocator();
+  explicit PersistentHistogramAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> memory);
+  virtual ~PersistentHistogramAllocator();
 
   // Direct access to underlying memory allocator. If the segment is shared
   // across threads or processes, reading data through these values does
@@ -54,6 +222,7 @@
   const char* Name() const { return memory_allocator_->Name(); }
   const void* data() const { return memory_allocator_->data(); }
   size_t length() const { return memory_allocator_->length(); }
+  size_t size() const { return memory_allocator_->size(); }
   size_t used() const { return memory_allocator_->used(); }
 
   // Recreate a Histogram from data held in persistent memory. Though this
@@ -61,19 +230,11 @@
   // shared with all other threads referencing it. This method takes a |ref|
   // to where the top-level histogram data may be found in this allocator.
   // This method will return null if any problem is detected with the data.
-  scoped_ptr<HistogramBase> GetHistogram(Reference ref);
-
-  // Get the next histogram in persistent data based on iterator.
-  scoped_ptr<HistogramBase> GetNextHistogram(Iterator* iter) {
-    return GetNextHistogramWithIgnore(iter, 0);
-  }
-
-  // Create an iterator for going through all histograms in an allocator.
-  void CreateIterator(Iterator* iter);
+  std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
 
   // Allocate a new persistent histogram. The returned histogram will not
   // be able to be located by other allocators until it is "finalized".
-  scoped_ptr<HistogramBase> AllocateHistogram(
+  std::unique_ptr<HistogramBase> AllocateHistogram(
       HistogramType histogram_type,
       const std::string& name,
       int minimum,
@@ -87,6 +248,14 @@
   // True, forgetting it otherwise.
   void FinalizeHistogram(Reference ref, bool registered);
 
+  // Returns the object that manages the persistent-sample-map records for a
+  // given |id|. Only one |user| of this data is allowed at a time. This does
+  // an automatic Acquire() on the records. The user must call Release() on
+  // the returned object when it is finished with it. Ownership stays with
+  // this allocator.
+  PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
+                                                  const void* user);
+
   // Create internal histograms for tracking memory use and allocation sizes
   // for allocator of |name| (which can simply be the result of Name()). This
   // is done seperately from construction for situations such as when the
@@ -99,50 +268,29 @@
   void CreateTrackingHistograms(StringPiece name);
   void UpdateTrackingHistograms();
 
-  // Manage a PersistentHistogramAllocator for globally storing histograms in
-  // a space that can be persisted or shared between processes. There is only
-  // ever one allocator for all such histograms created by a single process.
-  // This takes ownership of the object and should be called as soon as
-  // possible during startup to capture as many histograms as possible and
-  // while operating single-threaded so there are no race-conditions.
-  static void SetGlobalAllocator(
-      scoped_ptr<PersistentHistogramAllocator> allocator);
-  static PersistentHistogramAllocator* GetGlobalAllocator();
-
-  // This access to the persistent allocator is only for testing; it extracts
-  // the current allocator completely. This allows easy creation of histograms
-  // within persistent memory segments which can then be extracted and used
-  // in other ways.
-  static scoped_ptr<PersistentHistogramAllocator>
-  ReleaseGlobalAllocatorForTesting();
-
-  // These helper methods perform SetGlobalAllocator() calls with allocators
-  // of the specified type and parameters.
-  static void CreateGlobalAllocatorOnPersistentMemory(
-      void* base,
-      size_t size,
-      size_t page_size,
-      uint64_t id,
-      StringPiece name);
-  static void CreateGlobalAllocatorOnLocalMemory(
-      size_t size,
-      uint64_t id,
-      StringPiece name);
-  static void CreateGlobalAllocatorOnSharedMemory(
-      size_t size,
-      const SharedMemoryHandle& handle);
-
-  // Import new histograms from the global PersistentHistogramAllocator. It's
-  // possible for other processes to create histograms in the active memory
-  // segment; this adds those to the internal list of known histograms to
-  // avoid creating duplicates that would have to be merged during reporting.
-  // Every call to this method resumes from the last entry it saw; it costs
-  // nothing if nothing new has been added.
-  static void ImportGlobalHistograms();
+  // Clears the internal |last_created_| reference so testing can validate
+  // operation without that optimization.
+  void ClearLastCreatedReferenceForTesting();
 
   // Histogram containing creation results. Visible for testing.
   static HistogramBase* GetCreateHistogramResultHistogram();
 
+ protected:
+  // The structure used to hold histogram data in persistent memory. It is
+  // defined and used entirely within the .cc file.
+  struct PersistentHistogramData;
+
+  // Gets the reference of the last histogram created, used to avoid
+  // trying to import what was just created.
+  PersistentHistogramAllocator::Reference last_created() {
+    return subtle::NoBarrier_Load(&last_created_);
+  }
+
+  // Gets the next histogram in persistent data based on iterator while
+  // ignoring a particular reference if it is found.
+  std::unique_ptr<HistogramBase> GetNextHistogramWithIgnore(Iterator* iter,
+                                                            Reference ignore);
+
  private:
   // Enumerate possible creation results for reporting.
   enum CreateHistogramResultType {
@@ -180,33 +328,117 @@
     CREATE_HISTOGRAM_MAX
   };
 
-  // The structure used to hold histogram data in persistent memory. It is
-  // defined and used entirely within the .cc file.
-  struct PersistentHistogramData;
-
-  // Get the next histogram in persistent data based on iterator while
-  // ignoring a particular reference if it is found.
-  scoped_ptr<HistogramBase> GetNextHistogramWithIgnore(
-      Iterator* iter,
-      Reference ignore);
-
   // Create a histogram based on saved (persistent) information about it.
-  scoped_ptr<HistogramBase> CreateHistogram(
+  std::unique_ptr<HistogramBase> CreateHistogram(
       PersistentHistogramData* histogram_data_ptr);
 
   // Record the result of a histogram creation.
   static void RecordCreateHistogramResult(CreateHistogramResultType result);
 
   // The memory allocator that provides the actual histogram storage.
-  scoped_ptr<PersistentMemoryAllocator> memory_allocator_;
+  std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
+
+  // The data-manager used to improve performance of sparse histograms.
+  PersistentSparseHistogramDataManager sparse_histogram_data_manager_;
 
   // A reference to the last-created histogram in the allocator, used to avoid
   // trying to import what was just created.
-  subtle::AtomicWord last_created_ = 0;
+  // TODO(bcwhite): Change this to std::atomic<PMA::Reference> when available.
+  subtle::Atomic32 last_created_ = 0;
 
   DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
 };
 
+
+// A special case of the PersistentHistogramAllocator that operates on a
+// global scale, collecting histograms created through standard macros and
+// the FactoryGet() method.
+class BASE_EXPORT GlobalHistogramAllocator
+    : public PersistentHistogramAllocator {
+ public:
+  ~GlobalHistogramAllocator() override;
+
+  // Create a global allocator using the passed-in memory |base|, |size|, and
+  // other parameters. Ownership of the memory segment remains with the caller.
+  static void CreateWithPersistentMemory(void* base,
+                                         size_t size,
+                                         size_t page_size,
+                                         uint64_t id,
+                                         StringPiece name);
+
+  // Create a global allocator using an internal block of memory of the
+  // specified |size| taken from the heap.
+  static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
+
+  // Create a global allocator using a block of shared |memory| of the
+  // specified |size|. The allocator takes ownership of the shared memory
+  // and releases it upon destruction, though the memory will continue to
+  // live if other processes have access to it.
+  static void CreateWithSharedMemory(std::unique_ptr<SharedMemory> memory,
+                                     size_t size,
+                                     uint64_t id,
+                                     StringPiece name);
+
+  // Create a global allocator using a block of shared memory accessed
+  // through the given |handle| and |size|. The allocator takes ownership
+  // of the handle and closes it upon destruction, though the memory will
+  // continue to live if other processes have access to it.
+  static void CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
+                                           size_t size);
+
+  // Sets a GlobalHistogramAllocator for globally storing histograms in
+  // a space that can be persisted or shared between processes. There is only
+  // ever one allocator for all such histograms created by a single process.
+  // This takes ownership of the object and should be called as soon as
+  // possible during startup to capture as many histograms as possible and
+  // while operating single-threaded so there are no race-conditions.
+  static void Set(std::unique_ptr<GlobalHistogramAllocator> allocator);
+
+  // Gets a pointer to the global histogram allocator. Returns null if none
+  // exists.
+  static GlobalHistogramAllocator* Get();
+
+  // This access to the persistent allocator is only for testing; it extracts
+  // the current allocator completely. This allows easy creation of histograms
+  // within persistent memory segments which can then be extracted and used in
+  // other ways.
+  static std::unique_ptr<GlobalHistogramAllocator> ReleaseForTesting();
+
+  // Stores a pathname to which the contents of this allocator should be saved
+  // in order to persist the data for a later use.
+  void SetPersistentLocation(const FilePath& location);
+
+  // Writes the internal data to a previously set location. This is generally
+  // called when a process is exiting from a section of code that may not know
+  // the filesystem. The data is written in an atomic manner. The return value
+  // indicates success.
+  bool WriteToPersistentLocation();
+
+ private:
+  friend class StatisticsRecorder;
+
+  // Creates a new global histogram allocator.
+  explicit GlobalHistogramAllocator(
+      std::unique_ptr<PersistentMemoryAllocator> memory);
+
+  // Import new histograms from the global histogram allocator. It's possible
+  // for other processes to create histograms in the active memory segment;
+  // this adds those to the internal list of known histograms to avoid creating
+  // duplicates that would have to be merged during reporting. Every call to
+  // this method resumes from the last entry it saw; it costs nothing if
+  // nothing new has been added.
+  void ImportHistogramsToStatisticsRecorder();
+
+  // Import always continues from where it left off, making use of a single
+  // iterator to continue the work.
+  Iterator import_iterator_;
+
+  // The location to which the data should be persisted.
+  FilePath persistent_location_;
+
+  DISALLOW_COPY_AND_ASSIGN(GlobalHistogramAllocator);
+};
+
 }  // namespace base
 
 #endif  // BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
index c65eade..24a0753 100644
--- a/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -5,6 +5,7 @@
 #include "base/metrics/persistent_histogram_allocator.h"
 
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_memory_allocator.h"
@@ -24,22 +25,21 @@
   void CreatePersistentHistogramAllocator() {
     allocator_memory_.reset(new char[kAllocatorMemorySize]);
 
-    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+    GlobalHistogramAllocator::ReleaseForTesting();
     memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
-    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
-    PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory(
+    GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
         allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
         "PersistentHistogramAllocatorTest");
-    allocator_ =
-        PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
   }
 
   void DestroyPersistentHistogramAllocator() {
     allocator_ = nullptr;
-    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+    GlobalHistogramAllocator::ReleaseForTesting();
   }
 
-  scoped_ptr<char[]> allocator_memory_;
+  std::unique_ptr<char[]> allocator_memory_;
   PersistentMemoryAllocator* allocator_ = nullptr;
 
  private:
@@ -86,41 +86,39 @@
   allocator_->GetMemoryInfo(&meminfo4);
   EXPECT_GT(meminfo3.free, meminfo4.free);
 
-  PersistentMemoryAllocator::Iterator iter;
+  PersistentMemoryAllocator::Iterator iter(allocator_);
   uint32_t type;
-  allocator_->CreateIterator(&iter);
-  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // Histogram
-  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // LinearHistogram
-  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // BooleanHistogram
-  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // CustomHistogram
-  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_NE(0U, iter.GetNext(&type));  // Histogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // LinearHistogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // BooleanHistogram
+  EXPECT_NE(0U, iter.GetNext(&type));  // CustomHistogram
+  EXPECT_EQ(0U, iter.GetNext(&type));
 
   // Create a second allocator and have it access the memory of the first.
-  scoped_ptr<HistogramBase> recovered;
+  std::unique_ptr<HistogramBase> recovered;
   PersistentHistogramAllocator recovery(
-      make_scoped_ptr(new PersistentMemoryAllocator(
+      WrapUnique(new PersistentMemoryAllocator(
           allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
-  PersistentHistogramAllocator::Iterator histogram_iter;
-  recovery.CreateIterator(&histogram_iter);
+  PersistentHistogramAllocator::Iterator histogram_iter(&recovery);
 
-  recovered = recovery.GetNextHistogram(&histogram_iter);
-  ASSERT_TRUE(recovered.get());
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
   recovered->CheckName("TestHistogram");
 
-  recovered = recovery.GetNextHistogram(&histogram_iter);
-  ASSERT_TRUE(recovered.get());
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
   recovered->CheckName("TestLinearHistogram");
 
-  recovered = recovery.GetNextHistogram(&histogram_iter);
-  ASSERT_TRUE(recovered.get());
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
   recovered->CheckName("TestBooleanHistogram");
 
-  recovered = recovery.GetNextHistogram(&histogram_iter);
-  ASSERT_TRUE(recovered.get());
+  recovered = histogram_iter.GetNext();
+  ASSERT_TRUE(recovered);
   recovered->CheckName("TestCustomHistogram");
 
-  recovered = recovery.GetNextHistogram(&histogram_iter);
-  EXPECT_FALSE(recovered.get());
+  recovered = histogram_iter.GetNext();
+  EXPECT_FALSE(recovered);
 }
 
 }  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index a1a960c..bc873fe 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -45,12 +45,12 @@
 };
 
 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
-  uint32_t loaded_flags = flags->load();
+  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
   return (loaded_flags & flag) != 0;
 }
 
 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
-  uint32_t loaded_flags = flags->load();
+  uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
   for (;;) {
     uint32_t new_flags = (loaded_flags & ~flag) | flag;
     // In the failue case, actual "flags" value stored in loaded_flags.
@@ -116,8 +116,120 @@
 const PersistentMemoryAllocator::Reference
     PersistentMemoryAllocator::kReferenceQueue =
         offsetof(SharedMetadata, queue);
-const PersistentMemoryAllocator::Reference
-    PersistentMemoryAllocator::kReferenceNull = 0;
+
+const base::FilePath::CharType PersistentMemoryAllocator::kFileExtension[] =
+    FILE_PATH_LITERAL(".pma");
+
+
+PersistentMemoryAllocator::Iterator::Iterator(
+    const PersistentMemoryAllocator* allocator)
+    : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
+
+PersistentMemoryAllocator::Iterator::Iterator(
+    const PersistentMemoryAllocator* allocator,
+    Reference starting_after)
+    : allocator_(allocator), last_record_(starting_after), record_count_(0) {
+  // Ensure that the starting point is a valid, iterable block (meaning it can
+  // be read and has a non-zero "next" pointer).
+  const volatile BlockHeader* block =
+      allocator_->GetBlock(starting_after, 0, 0, false, false);
+  if (!block || block->next.load(std::memory_order_relaxed) == 0) {
+    NOTREACHED();
+    last_record_.store(kReferenceQueue, std::memory_order_release);
+  }
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
+  // Make a copy of the existing count of found-records, acquiring all changes
+  // made to the allocator, notably "freeptr" (see comment in loop for why
+  // the load of that value cannot be moved above here) that occurred during
+  // any previous runs of this method, including those by parallel threads
+  // that interrupted it. It pairs with the Release at the end of this method.
+  //
+  // Otherwise, if the compiler were to arrange the two loads such that
+  // "count" was fetched _after_ "freeptr" then it would be possible for
+  // this thread to be interrupted between them and other threads perform
+  // multiple allocations, make-iterables, and iterations (with the included
+  // increment of |record_count_|) culminating in the check at the bottom
+  // mistakenly determining that a loop exists. Isn't this stuff fun?
+  uint32_t count = record_count_.load(std::memory_order_acquire);
+
+  Reference last = last_record_.load(std::memory_order_acquire);
+  Reference next;
+  while (true) {
+    const volatile BlockHeader* block =
+        allocator_->GetBlock(last, 0, 0, true, false);
+    if (!block)  // Invalid iterator state.
+      return kReferenceNull;
+
+    // The compiler and CPU can freely reorder all memory accesses on which
+    // there are no dependencies. It could, for example, move the load of
+    // "freeptr" to above this point because there are no explicit dependencies
+    // between it and "next". If it did, however, then another block could
+    // be queued after that but before the following load meaning there is
+    // one more queued block than the future "detect loop by having more
+    // blocks that could fit before freeptr" will allow.
+    //
+    // By "acquiring" the "next" value here, it's synchronized to the enqueue
+    // of the node which in turn is synchronized to the allocation (which sets
+    // freeptr). Thus, the scenario above cannot happen.
+    next = block->next.load(std::memory_order_acquire);
+    if (next == kReferenceQueue)  // No next allocation in queue.
+      return kReferenceNull;
+    block = allocator_->GetBlock(next, 0, 0, false, false);
+    if (!block) {  // Memory is corrupt.
+      allocator_->SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // Update the "last_record" pointer to be the reference being returned.
+    // If it fails then another thread has already iterated past it so loop
+    // again. Failing will also load the existing value into "last" so there
+    // is no need to do another such load when the while-loop restarts. A
+    // "strong" compare-exchange is used because failing unnecessarily would
+    // mean repeating some fairly costly validations above.
+    if (last_record_.compare_exchange_strong(last, next)) {
+      *type_return = block->type_id;
+      break;
+    }
+  }
+
+  // Memory corruption could cause a loop in the list. Such must be detected
+  // so as to not cause an infinite loop in the caller. This is done by simply
+  // making sure it doesn't iterate more times than the absolute maximum
+  // number of allocations that could have been made. Callers are likely
+  // to loop multiple times before it is detected but at least it stops.
+  const uint32_t freeptr = std::min(
+      allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
+      allocator_->mem_size_);
+  const uint32_t max_records =
+      freeptr / (sizeof(BlockHeader) + kAllocAlignment);
+  if (count > max_records) {
+    allocator_->SetCorrupt();
+    return kReferenceNull;
+  }
+
+  // Increment the count and release the changes made above. It pairs with
+  // the Acquire at the top of this method. Note that this operation is not
+  // strictly synchonized with fetching of the object to return, which would
+  // have to be done inside the loop and is somewhat complicated to achieve.
+  // It does not matter if it falls behind temporarily so long as it never
+  // gets ahead.
+  record_count_.fetch_add(1, std::memory_order_release);
+  return next;
+}
+
+PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
+  Reference ref;
+  uint32_t type_found;
+  while ((ref = GetNext(&type_found)) != 0) {
+    if (type_found == type_match)
+      return ref;
+  }
+  return kReferenceNull;
+}
 
 
 // static
@@ -177,13 +289,13 @@
     if (shared_meta()->cookie != 0 ||
         shared_meta()->size != 0 ||
         shared_meta()->version != 0 ||
-        shared_meta()->freeptr.load() != 0 ||
-        shared_meta()->flags.load() != 0 ||
+        shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
+        shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
         shared_meta()->id != 0 ||
         shared_meta()->name != 0 ||
         shared_meta()->tailptr != 0 ||
         shared_meta()->queue.cookie != 0 ||
-        shared_meta()->queue.next.load() != 0 ||
+        shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
         first_block->size != 0 ||
         first_block->cookie != 0 ||
         first_block->type_id != 0 ||
@@ -199,13 +311,14 @@
     shared_meta()->page_size = mem_page_;
     shared_meta()->version = kGlobalVersion;
     shared_meta()->id = id;
-    shared_meta()->freeptr.store(sizeof(SharedMetadata));
+    shared_meta()->freeptr.store(sizeof(SharedMetadata),
+                                 std::memory_order_release);
 
     // Set up the queue of iterable allocations.
     shared_meta()->queue.size = sizeof(BlockHeader);
     shared_meta()->queue.cookie = kBlockCookieQueue;
-    shared_meta()->queue.next.store(kReferenceQueue);
-    shared_meta()->tailptr.store(kReferenceQueue);
+    shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
+    shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
 
     // Allocate space for the name so other processes can learn it.
     if (!name.empty()) {
@@ -218,10 +331,10 @@
   } else {
     if (shared_meta()->size == 0 ||
         shared_meta()->version == 0 ||
-        shared_meta()->freeptr.load() == 0 ||
+        shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
         shared_meta()->tailptr == 0 ||
         shared_meta()->queue.cookie == 0 ||
-        shared_meta()->queue.next.load() == 0) {
+        shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
       SetCorrupt();
     }
     if (!readonly) {
@@ -281,7 +394,8 @@
 }
 
 size_t PersistentMemoryAllocator::used() const {
-  return std::min(shared_meta()->freeptr.load(), mem_size_);
+  return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
+                  mem_size_);
 }
 
 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
@@ -354,7 +468,8 @@
   // the code below but recognize that any failed compare-exchange operation
   // involving it will cause it to be loaded with a more recent value. The
   // code should either exit or restart the loop in that case.
-  /* const */ uint32_t freeptr = shared_meta()->freeptr.load();
+  /* const */ uint32_t freeptr =
+      shared_meta()->freeptr.load(std::memory_order_acquire);
 
   // Allocation is lockless so we do all our caculation and then, if saving
   // indicates a change has occurred since we started, scrap everything and
@@ -424,7 +539,7 @@
     if (block->size != 0 ||
         block->cookie != kBlockCookieFree ||
         block->type_id != 0 ||
-        block->next.load() != 0) {
+        block->next.load(std::memory_order_relaxed) != 0) {
       SetCorrupt();
       return kReferenceNull;
     }
@@ -437,8 +552,9 @@
 }
 
 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
-  uint32_t remaining = std::max(mem_size_ - shared_meta()->freeptr.load(),
-                                (uint32_t)sizeof(BlockHeader));
+  uint32_t remaining = std::max(
+      mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
+      (uint32_t)sizeof(BlockHeader));
   meminfo->total = mem_size_;
   meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
 }
@@ -501,68 +617,6 @@
   }
 }
 
-void PersistentMemoryAllocator::CreateIterator(Iterator* state,
-                                               Reference starting_after) const {
-  if (starting_after) {
-    // Ensure that the starting point is a valid, iterable block.
-    const volatile BlockHeader* block =
-        GetBlock(starting_after, 0, 0, false, false);
-    if (!block || !block->next.load()) {
-      NOTREACHED();
-      starting_after = kReferenceQueue;
-    }
-  } else {
-    // A zero beginning is really the Queue reference.
-    starting_after = kReferenceQueue;
-  }
-
-  state->last = starting_after;
-  state->niter = 0;
-}
-
-PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable(
-    Iterator* state,
-    uint32_t* type_id) const {
-  const volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false);
-  if (!block)  // invalid iterator state
-    return kReferenceNull;
-
-  // The compiler and CPU can freely reorder all memory accesses on which
-  // there are no dependencies. It could, for example, move the load of
-  // "freeptr" above this point because there are no explicit dependencies
-  // between it and "next". If it did, however, then another block could
-  // be queued after that but before the following load meaning there is
-  // one more queued block than the future "detect loop by having more
-  // blocks that could fit before freeptr" will allow.
-  //
-  // By "acquiring" the "next" value here, it's synchronized to the enqueue
-  // of the node which in turn is synchronized to the allocation (which sets
-  // freeptr). Thus, the scenario above cannot happen.
-  uint32_t next = block->next.load(std::memory_order_acquire);
-  block = GetBlock(next, 0, 0, false, false);
-  if (!block)  // no next allocation in queue
-    return kReferenceNull;
-
-  // Memory corruption could cause a loop in the list. We need to detect
-  // that so as to not cause an infinite loop in the caller. We do this
-  // simply by making sure we don't iterate more than the absolute maximum
-  // number of allocations that could have been made. Callers are likely
-  // to loop multiple times before it is detected but at least it stops.
-  uint32_t freeptr = std::min(
-      shared_meta()->freeptr.load(std::memory_order_acquire),
-      mem_size_);
-  if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) {
-    SetCorrupt();
-    return kReferenceNull;
-  }
-
-  state->last = next;
-  state->niter++;
-  *type_id = block->type_id;
-
-  return next;
-}
-
 // The "corrupted" state is held both locally and globally (shared). The
 // shared flag can't be trusted since a malicious actor could overwrite it.
 // Because corruption can be detected during read-only operations such as
@@ -571,7 +625,8 @@
 // maybe even the shared flag if the underlying data isn't actually read-only.
 void PersistentMemoryAllocator::SetCorrupt() const {
   LOG(ERROR) << "Corruption detected in shared-memory segment.";
-  const_cast<std::atomic<bool>*>(&corrupt_)->store(true);
+  const_cast<std::atomic<bool>*>(&corrupt_)->store(true,
+                                                   std::memory_order_relaxed);
   if (!readonly_) {
     SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
             kFlagCorrupt);
@@ -579,7 +634,8 @@
 }
 
 bool PersistentMemoryAllocator::IsCorrupt() const {
-  if (corrupt_.load() || CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
+  if (corrupt_.load(std::memory_order_relaxed) ||
+      CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
     SetCorrupt();  // Make sure all indicators are set.
     return true;
   }
@@ -610,13 +666,16 @@
 
   // Validation of referenced block-header.
   if (!free_ok) {
-    uint32_t freeptr = shared_meta()->freeptr.load();
+    uint32_t freeptr = std::min(
+        shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
     if (ref + size > freeptr)
       return nullptr;
     const volatile BlockHeader* const block =
         reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
     if (block->size < size)
       return nullptr;
+    if (ref + block->size > freeptr)
+      return nullptr;
     if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
       return nullptr;
     if (type_id != 0 && block->type_id != type_id)
@@ -668,12 +727,16 @@
 //----- SharedPersistentMemoryAllocator ----------------------------------------
 
 SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
-    scoped_ptr<SharedMemory> memory,
+    std::unique_ptr<SharedMemory> memory,
     uint64_t id,
     base::StringPiece name,
     bool read_only)
     : PersistentMemoryAllocator(static_cast<uint8_t*>(memory->memory()),
-                                memory->mapped_size(), 0, id, name, read_only),
+                                memory->mapped_size(),
+                                0,
+                                id,
+                                name,
+                                read_only),
       shared_memory_(std::move(memory)) {}
 
 SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
@@ -688,11 +751,15 @@
 //----- FilePersistentMemoryAllocator ------------------------------------------
 
 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
-    scoped_ptr<MemoryMappedFile> file,
+    std::unique_ptr<MemoryMappedFile> file,
     uint64_t id,
     base::StringPiece name)
     : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
-                                file->length(), 0, id, name, true),
+                                file->length(),
+                                0,
+                                id,
+                                name,
+                                true),
       mapped_file_(std::move(file)) {}
 
 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index f75b1c0..56edd2c 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -6,13 +6,15 @@
 #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
 
 #include <stdint.h>
+
 #include <atomic>
+#include <memory>
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
+#include "base/files/file_path.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 
 namespace base {
@@ -49,22 +51,66 @@
  public:
   typedef uint32_t Reference;
 
-  // Internal state information when iterating over memory allocations.
-  class Iterator {
+  // Iterator for going through all iterable memory records in an allocator.
+  // Like the allocator itself, iterators are lock-free and thread-secure.
+  // That means that multiple threads can share an iterator and the same
+  // reference will not be returned twice.
+  //
+  // Iteration, in general, is tolerant of corrupted memory. It will return
+  // what it can and stop only when corruption forces it to. Bad corruption
+  // could cause the same object to be returned many times but it will
+  // eventually quit.
+  class BASE_EXPORT Iterator {
    public:
-    Iterator() : last(0) {}
+    // Constructs an iterator on a given |allocator|, starting at the beginning.
+    // The allocator must live beyond the lifetime of the iterator. This class
+    // has read-only access to the allocator (hence "const") but the returned
+    // references can be used on a read/write version, too.
+    explicit Iterator(const PersistentMemoryAllocator* allocator);
 
-    bool operator==(const Iterator& rhs) const { return last == rhs.last; }
-    bool operator!=(const Iterator& rhs) const { return last != rhs.last; }
+    // As above but resuming from the |starting_after| reference. The first call
+    // to GetNext() will return the next object found after that reference. The
+    // reference must be to an "iterable" object; references to non-iterable
+    // objects (those that never had MakeIterable() called for them) will cause
+    // a run-time error.
+    Iterator(const PersistentMemoryAllocator* allocator,
+             Reference starting_after);
 
-    void clear() { last = 0; }
-    bool is_clear() const { return last == 0; }
+    // Gets the next iterable, storing that type in |type_return|. The actual
+    // return value is a reference to the allocation inside the allocator or
+    // zero if there are no more. GetNext() may still be called again at a
+    // later time to retrieve any new allocations that have been added.
+    Reference GetNext(uint32_t* type_return);
+
+    // Similar to above but gets the next iterable of a specific |type_match|.
+    // This should not be mixed with calls to GetNext() because any allocations
+    // skipped here due to a type mis-match will never be returned by later
+    // calls to GetNext() meaning it's possible to completely miss entries.
+    Reference GetNextOfType(uint32_t type_match);
+
+    // Converts references to objects. This is a convenience method so that
+    // users of the iterator don't need to also have their own pointer to the
+    // allocator over which the iterator runs in order to retrieve objects.
+    // Because the iterator is not read/write, only "const" objects can be
+    // fetched. Non-const objects can be fetched using the reference on a
+    // non-const (external) pointer to the same allocator (or use const_cast
+    // to remove the qualifier).
+    template <typename T>
+    const T* GetAsObject(Reference ref, uint32_t type_id) const {
+      return allocator_->GetAsObject<T>(ref, type_id);
+    }
 
    private:
-    friend class PersistentMemoryAllocator;
+    // Weak-pointer to memory allocator being iterated over.
+    const PersistentMemoryAllocator* allocator_;
 
-    Reference last;
-    uint32_t niter;
+    // The last record that was returned.
+    std::atomic<Reference> last_record_;
+
+    // The number of records found; used for detecting loops.
+    std::atomic<uint32_t> record_count_;
+
+    DISALLOW_COPY_AND_ASSIGN(Iterator);
   };
 
   // Returned information about the internal state of the heap.
@@ -73,10 +119,18 @@
     size_t free;
   };
 
+  enum : Reference {
+    kReferenceNull = 0  // A common "null" reference value.
+  };
+
   enum : uint32_t {
     kTypeIdAny = 0  // Match any type-id inside GetAsObject().
   };
 
+  // This is the standard file extension (suitable for being passed to the
+  // AddExtension() method of base::FilePath) for dumps of persistent memory.
+  static const base::FilePath::CharType kFileExtension[];
+
   // The allocator operates on any arbitrary block of memory. Creation and
   // persisting or sharing of that block with another process is the
   // responsibility of the caller. The allocator needs to know only the
@@ -142,6 +196,7 @@
   // not guarantee consistency. Use with care. Do not write.
   const void* data() const { return const_cast<const char*>(mem_base_); }
   size_t length() const { return mem_size_; }
+  size_t size() const { return mem_size_; }
   size_t used() const;
 
   // Get an object referenced by a |ref|. For safety reasons, the |type_id|
@@ -211,17 +266,6 @@
   // also make the true amount less than what is reported.
   void GetMemoryInfo(MemoryInfo* meminfo) const;
 
-  // Iterating uses a |state| structure (initialized by CreateIterator) and
-  // returns both the reference to the object as well as the |type_id| of
-  // that object. A zero return value indicates there are currently no more
-  // objects to be found but future attempts can be made without having to
-  // reset the iterator to "first". Creating an iterator |starting_after|
-  // a known iterable object allows "resume" from that point with the next
-  // call to GetNextIterable returning the object after it.
-  void CreateIterator(Iterator* state) const { CreateIterator(state, 0); };
-  void CreateIterator(Iterator* state, Reference starting_after) const;
-  Reference GetNextIterable(Iterator* state, uint32_t* type_id) const;
-
   // If there is some indication that the memory has become corrupted,
   // calling this will attempt to prevent further damage by indicating to
   // all processes that something is not as expected.
@@ -250,7 +294,6 @@
   struct BlockHeader;
   static const uint32_t kAllocAlignment;
   static const Reference kReferenceQueue;
-  static const Reference kReferenceNull;
 
   // The shared metadata is always located at the top of the memory segment.
   // These convenience functions eliminate constant casting of the base
@@ -321,8 +364,10 @@
 class BASE_EXPORT SharedPersistentMemoryAllocator
     : public PersistentMemoryAllocator {
  public:
-  SharedPersistentMemoryAllocator(scoped_ptr<SharedMemory> memory, uint64_t id,
-                                  base::StringPiece name, bool read_only);
+  SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
+                                  uint64_t id,
+                                  base::StringPiece name,
+                                  bool read_only);
   ~SharedPersistentMemoryAllocator() override;
 
   SharedMemory* shared_memory() { return shared_memory_.get(); }
@@ -334,7 +379,7 @@
   static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
 
  private:
-  scoped_ptr<SharedMemory> shared_memory_;
+  std::unique_ptr<SharedMemory> shared_memory_;
 
   DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
 };
@@ -346,7 +391,8 @@
 class BASE_EXPORT FilePersistentMemoryAllocator
     : public PersistentMemoryAllocator {
  public:
-  FilePersistentMemoryAllocator(scoped_ptr<MemoryMappedFile> file, uint64_t id,
+  FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
+                                uint64_t id,
                                 base::StringPiece name);
   ~FilePersistentMemoryAllocator() override;
 
@@ -357,7 +403,7 @@
   static bool IsFileAcceptable(const MemoryMappedFile& file);
 
  private:
-  scoped_ptr<MemoryMappedFile> mapped_file_;
+  std::unique_ptr<MemoryMappedFile> mapped_file_;
 
   DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
 };
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index c79d0c1..70e1392 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -4,15 +4,18 @@
 
 #include "base/metrics/persistent_memory_allocator.h"
 
+#include <memory>
+
 #include "base/files/file.h"
 #include "base/files/file_util.h"
 #include "base/files/memory_mapped_file.h"
 #include "base/files/scoped_temp_dir.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/shared_memory.h"
 #include "base/metrics/histogram.h"
 #include "base/rand_util.h"
 #include "base/strings/safe_sprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
 #include "base/threading/simple_thread.h"
 #include "testing/gmock/include/gmock/gmock.h"
 
@@ -68,19 +71,18 @@
   }
 
   unsigned CountIterables() {
-    PersistentMemoryAllocator::Iterator iter;
+    PersistentMemoryAllocator::Iterator iter(allocator_.get());
     uint32_t type;
     unsigned count = 0;
-    for (allocator_->CreateIterator(&iter);
-         allocator_->GetNextIterable(&iter, &type) != 0;) {
-      count++;
+    while (iter.GetNext(&type) != 0) {
+      ++count;
     }
     return count;
   }
 
  protected:
-  scoped_ptr<char[]> mem_segment_;
-  scoped_ptr<PersistentMemoryAllocator> allocator_;
+  std::unique_ptr<char[]> mem_segment_;
+  std::unique_ptr<PersistentMemoryAllocator> allocator_;
 };
 
 TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
@@ -114,14 +116,13 @@
   EXPECT_GT(meminfo0.free, meminfo1.free);
 
   // Ensure that the test-object can be made iterable.
-  PersistentMemoryAllocator::Iterator iter;
+  PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
   uint32_t type;
-  allocator_->CreateIterator(&iter);
-  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
   allocator_->MakeIterable(block1);
-  EXPECT_EQ(block1, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(block1, iter1a.GetNext(&type));
   EXPECT_EQ(1U, type);
-  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
 
   // Create second test-object and ensure everything is good and it cannot
   // be confused with test-object of another type.
@@ -139,14 +140,14 @@
 
   // Ensure that second test-object can also be made iterable.
   allocator_->MakeIterable(block2);
-  EXPECT_EQ(block2, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(block2, iter1a.GetNext(&type));
   EXPECT_EQ(2U, type);
-  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(0U, iter1a.GetNext(&type));
 
   // Check that iteration can begin after an arbitrary location.
-  allocator_->CreateIterator(&iter, block1);
-  EXPECT_EQ(block2, allocator_->GetNextIterable(&iter, &type));
-  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+  PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
+  EXPECT_EQ(block2, iter1b.GetNext(&type));
+  EXPECT_EQ(0U, iter1b.GetNext(&type));
 
   // Ensure nothing has gone noticably wrong.
   EXPECT_FALSE(allocator_->IsFull());
@@ -154,15 +155,15 @@
 
   // Check the internal histogram record of used memory.
   allocator_->UpdateTrackingHistograms();
-  scoped_ptr<HistogramSamples> used_samples(
+  std::unique_ptr<HistogramSamples> used_samples(
       allocator_->used_histogram_->SnapshotSamples());
-  EXPECT_TRUE(used_samples.get());
+  EXPECT_TRUE(used_samples);
   EXPECT_EQ(1, used_samples->TotalCount());
 
   // Check the internal histogram record of allocation requests.
-  scoped_ptr<HistogramSamples> allocs_samples(
+  std::unique_ptr<HistogramSamples> allocs_samples(
       allocator_->allocs_histogram_->SnapshotSamples());
-  EXPECT_TRUE(allocs_samples.get());
+  EXPECT_TRUE(allocs_samples);
   EXPECT_EQ(2, allocs_samples->TotalCount());
   EXPECT_EQ(0, allocs_samples->GetCount(0));
   EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1)));
@@ -182,38 +183,42 @@
   EXPECT_EQ(2U, allocator_->GetType(block2));
 
   // Create second allocator (read/write) using the same memory segment.
-  scoped_ptr<PersistentMemoryAllocator> allocator2(
-      new PersistentMemoryAllocator(
-          mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, 0, "",
-          false));
+  std::unique_ptr<PersistentMemoryAllocator> allocator2(
+      new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+                                    TEST_MEMORY_PAGE, 0, "", false));
   EXPECT_EQ(TEST_ID, allocator2->Id());
   EXPECT_FALSE(allocator2->used_histogram_);
   EXPECT_FALSE(allocator2->allocs_histogram_);
   EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_);
 
   // Ensure that iteration and access through second allocator works.
-  allocator2->CreateIterator(&iter);
-  EXPECT_EQ(block1, allocator2->GetNextIterable(&iter, &type));
-  EXPECT_EQ(block2, allocator2->GetNextIterable(&iter, &type));
-  EXPECT_EQ(0U, allocator2->GetNextIterable(&iter, &type));
+  PersistentMemoryAllocator::Iterator iter2(allocator2.get());
+  EXPECT_EQ(block1, iter2.GetNext(&type));
+  EXPECT_EQ(block2, iter2.GetNext(&type));
+  EXPECT_EQ(0U, iter2.GetNext(&type));
   EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
   EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
 
   // Create a third allocator (read-only) using the same memory segment.
-  scoped_ptr<const PersistentMemoryAllocator> allocator3(
-      new PersistentMemoryAllocator(
-          mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, 0, "", true));
+  std::unique_ptr<const PersistentMemoryAllocator> allocator3(
+      new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
+                                    TEST_MEMORY_PAGE, 0, "", true));
   EXPECT_EQ(TEST_ID, allocator3->Id());
   EXPECT_FALSE(allocator3->used_histogram_);
   EXPECT_FALSE(allocator3->allocs_histogram_);
 
   // Ensure that iteration and access through third allocator works.
-  allocator3->CreateIterator(&iter);
-  EXPECT_EQ(block1, allocator3->GetNextIterable(&iter, &type));
-  EXPECT_EQ(block2, allocator3->GetNextIterable(&iter, &type));
-  EXPECT_EQ(0U, allocator3->GetNextIterable(&iter, &type));
+  PersistentMemoryAllocator::Iterator iter3(allocator3.get());
+  EXPECT_EQ(block1, iter3.GetNext(&type));
+  EXPECT_EQ(block2, iter3.GetNext(&type));
+  EXPECT_EQ(0U, iter3.GetNext(&type));
   EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
   EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
+
+  // Ensure that GetNextOfType works.
+  PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
+  EXPECT_EQ(block2, iter1c.GetNextOfType(2));
+  EXPECT_EQ(0U, iter1c.GetNextOfType(2));
 }
 
 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -305,6 +310,103 @@
             t5.iterable());
 }
 
+// A simple thread that counts objects by iterating through an allocator.
+class CounterThread : public SimpleThread {
+ public:
+  CounterThread(const std::string& name,
+                PersistentMemoryAllocator::Iterator* iterator,
+                Lock* lock,
+                ConditionVariable* condition)
+      : SimpleThread(name, Options()),
+        iterator_(iterator),
+        lock_(lock),
+        condition_(condition),
+        count_(0) {}
+
+  void Run() override {
+    // Wait so all threads can start at approximately the same time.
+    // Best performance comes from releasing a single worker which then
+    // releases the next, etc., etc.
+    {
+      AutoLock autolock(*lock_);
+      condition_->Wait();
+      condition_->Signal();
+    }
+
+    uint32_t type;
+    while (iterator_->GetNext(&type) != 0) {
+      ++count_;
+    }
+  }
+
+  unsigned count() { return count_; }
+
+ private:
+  PersistentMemoryAllocator::Iterator* iterator_;
+  Lock* lock_;
+  ConditionVariable* condition_;
+  unsigned count_;
+};
+
+// Ensure that parallel iteration returns the same number of objects as
+// single-threaded iteration.
+TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
+  // Fill the memory segment with random allocations.
+  unsigned iterable_count = 0;
+  for (;;) {
+    uint32_t size = RandInt(1, 99);
+    uint32_t type = RandInt(100, 999);
+    Reference block = allocator_->Allocate(size, type);
+    if (!block)
+      break;
+    allocator_->MakeIterable(block);
+    ++iterable_count;
+  }
+  EXPECT_FALSE(allocator_->IsCorrupt());
+  EXPECT_TRUE(allocator_->IsFull());
+  EXPECT_EQ(iterable_count, CountIterables());
+
+  PersistentMemoryAllocator::Iterator iter(allocator_.get());
+  Lock lock;
+  ConditionVariable condition(&lock);
+
+  CounterThread t1("t1", &iter, &lock, &condition);
+  CounterThread t2("t2", &iter, &lock, &condition);
+  CounterThread t3("t3", &iter, &lock, &condition);
+  CounterThread t4("t4", &iter, &lock, &condition);
+  CounterThread t5("t5", &iter, &lock, &condition);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  // This will release all the waiting threads.
+  condition.Signal();
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  EXPECT_EQ(iterable_count,
+            t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
+
+#if 0
+  // These ensure that the threads don't run sequentially. It shouldn't be
+  // enabled in general because it could lead to a flaky test if it happens
+  // simply by chance but it is useful during development to ensure that the
+  // test is working correctly.
+  EXPECT_NE(iterable_count, t1.count());
+  EXPECT_NE(iterable_count, t2.count());
+  EXPECT_NE(iterable_count, t3.count());
+  EXPECT_NE(iterable_count, t4.count());
+  EXPECT_NE(iterable_count, t5.count());
+#endif
+}
+
 // This test doesn't verify anything other than it doesn't crash. Its goal
 // is to find coding errors that aren't otherwise tested for, much like a
 // "fuzzer" would.
@@ -396,7 +498,7 @@
   PersistentMemoryAllocator::MemoryInfo meminfo1;
   Reference r123, r456, r789;
   {
-    scoped_ptr<SharedMemory> shmem1(new SharedMemory());
+    std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
     ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
     SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
                                           false);
@@ -417,8 +519,8 @@
   }
 
   // Read-only test.
-  scoped_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle,
-                                                   /*readonly=*/true));
+  std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle,
+                                                        /*readonly=*/true));
   ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
 
   SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
@@ -427,12 +529,11 @@
   EXPECT_FALSE(shalloc2.IsFull());
   EXPECT_FALSE(shalloc2.IsCorrupt());
 
-  PersistentMemoryAllocator::Iterator iter2;
+  PersistentMemoryAllocator::Iterator iter2(&shalloc2);
   uint32_t type;
-  shalloc2.CreateIterator(&iter2);
-  EXPECT_EQ(r123, shalloc2.GetNextIterable(&iter2, &type));
-  EXPECT_EQ(r789, shalloc2.GetNextIterable(&iter2, &type));
-  EXPECT_EQ(0U, shalloc2.GetNextIterable(&iter2, &type));
+  EXPECT_EQ(r123, iter2.GetNext(&type));
+  EXPECT_EQ(r789, iter2.GetNext(&type));
+  EXPECT_EQ(0U, iter2.GetNext(&type));
 
   EXPECT_EQ(123U, shalloc2.GetType(r123));
   EXPECT_EQ(654U, shalloc2.GetType(r456));
@@ -444,8 +545,8 @@
   EXPECT_EQ(meminfo1.free, meminfo2.free);
 
   // Read/write test.
-  scoped_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle,
-                                                   /*readonly=*/false));
+  std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle,
+                                                        /*readonly=*/false));
   ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
 
   SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
@@ -454,11 +555,10 @@
   EXPECT_FALSE(shalloc3.IsFull());
   EXPECT_FALSE(shalloc3.IsCorrupt());
 
-  PersistentMemoryAllocator::Iterator iter3;
-  shalloc3.CreateIterator(&iter3);
-  EXPECT_EQ(r123, shalloc3.GetNextIterable(&iter3, &type));
-  EXPECT_EQ(r789, shalloc3.GetNextIterable(&iter3, &type));
-  EXPECT_EQ(0U, shalloc3.GetNextIterable(&iter3, &type));
+  PersistentMemoryAllocator::Iterator iter3(&shalloc3);
+  EXPECT_EQ(r123, iter3.GetNext(&type));
+  EXPECT_EQ(r789, iter3.GetNext(&type));
+  EXPECT_EQ(0U, iter3.GetNext(&type));
 
   EXPECT_EQ(123U, shalloc3.GetType(r123));
   EXPECT_EQ(654U, shalloc3.GetType(r456));
@@ -473,7 +573,7 @@
   Reference obj = shalloc3.Allocate(42, 42);
   ASSERT_TRUE(obj);
   shalloc3.MakeIterable(obj);
-  EXPECT_EQ(obj, shalloc2.GetNextIterable(&iter2, &type));
+  EXPECT_EQ(obj, iter2.GetNext(&type));
   EXPECT_EQ(42U, type);
 }
 
@@ -505,7 +605,7 @@
     writer.Write(0, (const char*)local.data(), local.used());
   }
 
-  scoped_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+  std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
   mmfile->Initialize(file_path);
   EXPECT_TRUE(mmfile->IsValid());
   const size_t mmlength = mmfile->length();
@@ -517,12 +617,11 @@
   EXPECT_FALSE(file.IsFull());
   EXPECT_FALSE(file.IsCorrupt());
 
-  PersistentMemoryAllocator::Iterator iter;
+  PersistentMemoryAllocator::Iterator iter(&file);
   uint32_t type;
-  file.CreateIterator(&iter);
-  EXPECT_EQ(r123, file.GetNextIterable(&iter, &type));
-  EXPECT_EQ(r789, file.GetNextIterable(&iter, &type));
-  EXPECT_EQ(0U, file.GetNextIterable(&iter, &type));
+  EXPECT_EQ(r123, iter.GetNext(&type));
+  EXPECT_EQ(r789, iter.GetNext(&type));
+  EXPECT_EQ(0U, iter.GetNext(&type));
 
   EXPECT_EQ(123U, file.GetType(r123));
   EXPECT_EQ(654U, file.GetType(r456));
@@ -542,13 +641,13 @@
   FilePath file_path_base = temp_dir.path().AppendASCII("persistent_memory_");
 
   LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
-  local.Allocate(1, 1);
-  local.Allocate(11, 11);
+  local.MakeIterable(local.Allocate(1, 1));
+  local.MakeIterable(local.Allocate(11, 11));
   const size_t minsize = local.used();
-  scoped_ptr<char[]> garbage(new char[minsize]);
+  std::unique_ptr<char[]> garbage(new char[minsize]);
   RandBytes(garbage.get(), minsize);
 
-  scoped_ptr<MemoryMappedFile> mmfile;
+  std::unique_ptr<MemoryMappedFile> mmfile;
   char filename[100];
   for (size_t filesize = minsize; filesize > 0; --filesize) {
     strings::SafeSPrintf(filename, "memory_%d_A", filesize);
@@ -565,15 +664,14 @@
     mmfile->Initialize(file_path);
     EXPECT_EQ(filesize, mmfile->length());
     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
-      // Make sure construction doesn't crash.
+      // Make sure construction doesn't crash. It will, however, cause
+      // error messages warning about about a corrupted memory segment.
       FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
       // Also make sure that iteration doesn't crash.
-      PersistentMemoryAllocator::Iterator iter;
-      allocator.CreateIterator(&iter);
-      for (;;) {
-        Reference ref = allocator.GetNextIterable(&iter, 0);
-        if (!ref)
-          break;
+      PersistentMemoryAllocator::Iterator iter(&allocator);
+      uint32_t type_id;
+      Reference ref;
+      while ((ref = iter.GetNext(&type_id)) != 0) {
         const char* data = allocator.GetAsObject<char>(ref, 0);
         uint32_t type = allocator.GetType(ref);
         size_t size = allocator.GetAllocSize(ref);
@@ -581,9 +679,9 @@
         (void)data;
         (void)type;
         (void)size;
-        // Ensure that corruption-detected flag gets properly set.
-        EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
       }
+      // Ensure that short files are detected as corrupt and full files are not.
+      EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
     } else {
       // For filesize >= minsize, the file must be acceptable. This
       // else clause (file-not-acceptable) should be reached only if
@@ -605,7 +703,8 @@
     mmfile->Initialize(file_path);
     EXPECT_EQ(filesize, mmfile->length());
     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
-      // Just need to make sure it doesn't crash.
+      // Make sure construction doesn't crash. It will, however, cause
+      // error messages warning about about a corrupted memory segment.
       FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
       EXPECT_TRUE(allocator.IsCorrupt());  // Garbage data so it should be.
     } else {
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
index 014a865..15f83cd 100644
--- a/base/metrics/persistent_sample_map.cc
+++ b/base/metrics/persistent_sample_map.cc
@@ -5,6 +5,8 @@
 #include "base/metrics/persistent_sample_map.h"
 
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_histogram_allocator.h"
 #include "base/stl_util.h"
 
 namespace base {
@@ -92,23 +94,15 @@
 
 PersistentSampleMap::PersistentSampleMap(
     uint64_t id,
-    PersistentMemoryAllocator* allocator,
+    PersistentHistogramAllocator* allocator,
     Metadata* meta)
-    : HistogramSamples(id, meta),
-      allocator_(allocator) {
-  // This is created once but will continue to return new iterables even when
-  // it has previously reached the end.
-  allocator->CreateIterator(&sample_iter_);
+    : HistogramSamples(id, meta), allocator_(allocator) {}
 
-  // Load all existing samples during construction. It's no worse to do it
-  // here than at some point in the future and could be better if construction
-  // takes place on some background thread. New samples could be created at
-  // any time by parallel threads; if so, they'll get loaded when needed.
-  ImportSamples(kAllSamples);
+PersistentSampleMap::~PersistentSampleMap() {
+  if (records_)
+    records_->Release(this);
 }
 
-PersistentSampleMap::~PersistentSampleMap() {}
-
 void PersistentSampleMap::Accumulate(Sample value, Count count) {
   *GetOrCreateSampleCountStorage(value) += count;
   IncreaseSum(static_cast<int64_t>(count) * value);
@@ -126,7 +120,7 @@
 Count PersistentSampleMap::TotalCount() const {
   // Have to override "const" in order to make sure all samples have been
   // loaded before trying to iterate over the map.
-  const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
 
   Count count = 0;
   for (const auto& entry : sample_counts_) {
@@ -135,11 +129,51 @@
   return count;
 }
 
-scoped_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
+std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
   // Have to override "const" in order to make sure all samples have been
   // loaded before trying to iterate over the map.
-  const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
-  return make_scoped_ptr(new PersistentSampleMapIterator(sample_counts_));
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
+  return WrapUnique(new PersistentSampleMapIterator(sample_counts_));
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::GetNextPersistentRecord(
+    PersistentMemoryAllocator::Iterator& iterator,
+    uint64_t* sample_map_id) {
+  PersistentMemoryAllocator::Reference ref =
+      iterator.GetNextOfType(kTypeIdSampleRecord);
+  const SampleRecord* record =
+      iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+  if (!record)
+    return 0;
+
+  *sample_map_id = record->id;
+  return ref;
+}
+
+// static
+PersistentMemoryAllocator::Reference
+PersistentSampleMap::CreatePersistentRecord(
+    PersistentMemoryAllocator* allocator,
+    uint64_t sample_map_id,
+    Sample value) {
+  PersistentMemoryAllocator::Reference ref =
+      allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
+  SampleRecord* record =
+      allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+
+  if (!record) {
+    NOTREACHED() << "full=" << allocator->IsFull()
+                 << ", corrupt=" << allocator->IsCorrupt();
+    return 0;
+  }
+
+  record->id = sample_map_id;
+  record->value = value;
+  record->count = 0;
+  allocator->MakeIterable(ref);
+  return ref;
 }
 
 bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
@@ -159,15 +193,13 @@
 }
 
 Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
-  DCHECK_LE(0, value);
-
   // If |value| is already in the map, just return that.
   auto it = sample_counts_.find(value);
   if (it != sample_counts_.end())
     return it->second;
 
   // Import any new samples from persistent memory looking for the value.
-  return ImportSamples(value);
+  return ImportSamples(value, false);
 }
 
 Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
@@ -176,26 +208,19 @@
   if (count_pointer)
     return count_pointer;
 
-  // Create a new record in persistent memory for the value.
-  PersistentMemoryAllocator::Reference ref =
-      allocator_->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
-  SampleRecord* record =
-      allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
-  if (!record) {
-    // If the allocator was unable to create a record then it is full or
-    // corrupt. Instead, allocate the counter from the heap. This sample will
-    // not be persistent, will not be shared, and will leak but it's better
-    // than crashing.
-    NOTREACHED() << "full=" << allocator_->IsFull()
-                 << ", corrupt=" << allocator_->IsCorrupt();
+  // Create a new record in persistent memory for the value. |records_| will
+  // have been initialized by the GetSampleCountStorage() call above.
+  DCHECK(records_);
+  PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
+  if (!ref) {
+    // If a new record could not be created then the underlying allocator is
+    // full or corrupt. Instead, allocate the counter from the heap. This
+    // sample will not be persistent, will not be shared, and will leak...
+    // but it's better than crashing.
     count_pointer = new Count(0);
     sample_counts_[value] = count_pointer;
     return count_pointer;
   }
-  record->id = id();
-  record->value = value;
-  record->count = 0;  // Should already be zero but don't trust other processes.
-  allocator_->MakeIterable(ref);
 
   // A race condition between two independent processes (i.e. two independent
   // histogram objects sharing the same sample data) could cause two of the
@@ -206,62 +231,59 @@
   // Thread-safety within a process where multiple threads use the same
   // histogram object is delegated to the controlling histogram object which,
   // for sparse histograms, is a lock object.
-  count_pointer = ImportSamples(value);
+  count_pointer = ImportSamples(value, false);
   DCHECK(count_pointer);
   return count_pointer;
 }
 
-Count* PersistentSampleMap::ImportSamples(Sample until_value) {
-  // TODO(bcwhite): This import operates in O(V+N) total time per sparse
-  // histogram where V is the number of values for this object and N is
-  // the number of other iterable objects in the allocator. This becomes
-  // O(S*(SV+N)) or O(S^2*V + SN) overall where S is the number of sparse
-  // histograms.
-  //
-  // This is actually okay when histograms are expected to exist for the
-  // lifetime of the program, spreading the cost out, and S and V are
-  // relatively small, as is the current case.
-  //
-  // However, it is not so good for objects that are created, detroyed, and
-  // recreated on a periodic basis, such as when making a snapshot of
-  // sparse histograms owned by another, ongoing process. In that case, the
-  // entire cost is compressed into a single sequential operation... on the
-  // UI thread no less.
-  //
-  // This will be addressed in a future CL.
+PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
+  // The |records_| pointer is lazily fetched from the |allocator_| only on
+  // first use. Sometimes duplicate histograms are created by race conditions
+  // and if both were to grab the records object, there would be a conflict.
+  // Use of a histogram, and thus a call to this method, won't occur until
+  // after the histogram has been de-dup'd.
+  if (!records_)
+    records_ = allocator_->UseSampleMapRecords(id(), this);
+  return records_;
+}
 
-  uint32_t type_id;
+Count* PersistentSampleMap::ImportSamples(Sample until_value,
+                                          bool import_everything) {
+  Count* found_count = nullptr;
   PersistentMemoryAllocator::Reference ref;
-  while ((ref = allocator_->GetNextIterable(&sample_iter_, &type_id)) != 0) {
-    if (type_id == kTypeIdSampleRecord) {
-      SampleRecord* record =
-          allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
-      if (!record)
-        continue;
+  PersistentSampleMapRecords* records = GetRecords();
+  while ((ref = records->GetNext()) != 0) {
+    SampleRecord* record =
+        records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+    if (!record)
+      continue;
 
-      // A sample record has been found but may not be for this histogram.
-      if (record->id != id())
-        continue;
+    DCHECK_EQ(id(), record->id);
 
-      // Check if the record's value is already known.
-      if (!ContainsKey(sample_counts_, record->value)) {
-        // No: Add it to map of known values if the value is valid.
-        if (record->value >= 0)
-          sample_counts_[record->value] = &record->count;
-      } else {
-        // Yes: Ignore it; it's a duplicate caused by a race condition -- see
-        // code & comment in GetOrCreateSampleCountStorage() for details.
-        // Check that nothing ever operated on the duplicate record.
-        DCHECK_EQ(0, record->count);
-      }
+    // Check if the record's value is already known.
+    if (!ContainsKey(sample_counts_, record->value)) {
+      // No: Add it to map of known values.
+      sample_counts_[record->value] = &record->count;
+    } else {
+      // Yes: Ignore it; it's a duplicate caused by a race condition -- see
+      // code & comment in GetOrCreateSampleCountStorage() for details.
+      // Check that nothing ever operated on the duplicate record.
+      DCHECK_EQ(0, record->count);
+    }
 
-      // Stop if it's the value being searched for.
-      if (record->value == until_value)
-        return &record->count;
+    // Check if it's the value being searched for and, if so, keep a pointer
+    // to return later. Stop here unless everything is being imported.
+    // Because race conditions can cause multiple records for a single value,
+    // be sure to return the first one found.
+    if (record->value == until_value) {
+      if (!found_count)
+        found_count = &record->count;
+      if (!import_everything)
+        break;
     }
   }
 
-  return nullptr;
+  return found_count;
 }
 
 }  // namespace base
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
index a23b751..3c175db 100644
--- a/base/metrics/persistent_sample_map.h
+++ b/base/metrics/persistent_sample_map.h
@@ -12,23 +12,30 @@
 #include <stdint.h>
 
 #include <map>
+#include <memory>
 
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/persistent_memory_allocator.h"
 
 namespace base {
 
+class PersistentHistogramAllocator;
+class PersistentSampleMapRecords;
+class PersistentSparseHistogramDataManager;
+
 // The logic here is similar to that of SampleMap but with different data
 // structures. Changes here likely need to be duplicated there.
 class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
  public:
+  // Constructs a persistent sample map using a PersistentHistogramAllocator
+  // as the data source for persistent records.
   PersistentSampleMap(uint64_t id,
-                      PersistentMemoryAllocator* allocator,
+                      PersistentHistogramAllocator* allocator,
                       Metadata* meta);
+
   ~PersistentSampleMap() override;
 
   // HistogramSamples:
@@ -36,7 +43,21 @@
                   HistogramBase::Count count) override;
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
-  scoped_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
+
+  // Uses a persistent-memory |iterator| to locate and return information about
+  // the next record holding information for a PersistentSampleMap. The record
+  // could be for any Map so return the |sample_map_id| as well.
+  static PersistentMemoryAllocator::Reference GetNextPersistentRecord(
+      PersistentMemoryAllocator::Iterator& iterator,
+      uint64_t* sample_map_id);
+
+  // Creates a new record in an |allocator| storing count information for a
+  // specific sample |value| of a histogram with the given |sample_map_id|.
+  static PersistentMemoryAllocator::Reference CreatePersistentRecord(
+      PersistentMemoryAllocator* allocator,
+      uint64_t sample_map_id,
+      HistogramBase::Sample value);
 
  protected:
   // Performs arithemetic. |op| is ADD or SUBTRACT.
@@ -52,23 +73,34 @@
       HistogramBase::Sample value);
 
  private:
-  enum : HistogramBase::Sample { kAllSamples = -1 };
+  // Gets the object that manages persistent records. This returns the
+  // |records_| member after first initializing it if necessary.
+  PersistentSampleMapRecords* GetRecords();
 
   // Imports samples from persistent memory by iterating over all sample
   // records found therein, adding them to the sample_counts_ map. If a
   // count for the sample |until_value| is found, stop the import and return
   // a pointer to that counter. If that value is not found, null will be
   // returned after all currently available samples have been loaded. Pass
-  // kAllSamples to force the importing of all available samples.
-  HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value);
+  // true for |import_everything| to force the importing of all available
+  // samples even if a match is found.
+  HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value,
+                                      bool import_everything);
 
   // All created/loaded sample values and their associated counts. The storage
-  // for the actual Count numbers is owned by the |allocator_|.
+  // for the actual Count numbers is owned by the |records_| object and its
+  // underlying allocator.
   std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
 
-  // The persistent memory allocator holding samples and an iterator through it.
-  PersistentMemoryAllocator* allocator_;
-  PersistentMemoryAllocator::Iterator sample_iter_;
+  // The allocator that manages histograms inside persistent memory. This is
+  // owned externally and is expected to live beyond the life of this object.
+  PersistentHistogramAllocator* allocator_;
+
+  // The object that manages sample records inside persistent memory. This is
+  // owned by the |allocator_| object (above) and so, like it, is expected to
+  // live beyond the life of this object. This value is lazily-initialized on
+  // first use via the GetRecords() accessor method.
+  PersistentSampleMapRecords* records_ = nullptr;
 
   DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
 };
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
index c735f8f..beb72e5 100644
--- a/base/metrics/persistent_sample_map_unittest.cc
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -4,19 +4,34 @@
 
 #include "base/metrics/persistent_sample_map.h"
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
+#include "base/memory/ptr_util.h"
+#include "base/metrics/persistent_histogram_allocator.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 namespace {
 
-TEST(PersistentSampleMapTest, AccumulateTest) {
-  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+std::unique_ptr<PersistentHistogramAllocator> CreateHistogramAllocator(
+    size_t bytes) {
+  return WrapUnique(new PersistentHistogramAllocator(
+      WrapUnique(new LocalPersistentMemoryAllocator(bytes, 0, ""))));
+}
 
-  HistogramSamples::Metadata* meta =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples(1, &allocator, meta);
+std::unique_ptr<PersistentHistogramAllocator> DuplicateHistogramAllocator(
+    PersistentHistogramAllocator* original) {
+  return WrapUnique(
+      new PersistentHistogramAllocator(WrapUnique(new PersistentMemoryAllocator(
+          const_cast<void*>(original->data()), original->length(), 0,
+          original->Id(), original->Name(), false))));
+}
+
+TEST(PersistentSampleMapTest, AccumulateTest) {
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
 
   samples.Accumulate(1, 100);
   samples.Accumulate(2, 200);
@@ -30,12 +45,10 @@
 }
 
 TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
-  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
-
-  HistogramSamples::Metadata* meta =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples(1, &allocator, meta);
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
 
   samples.Accumulate(250000000, 100);
   samples.Accumulate(500000000, 200);
@@ -49,21 +62,18 @@
 }
 
 TEST(PersistentSampleMapTest, AddSubtractTest) {
-  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
-
-  HistogramSamples::Metadata* meta1 =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  HistogramSamples::Metadata* meta2 =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples1(1, &allocator, meta1);
-  PersistentSampleMap samples2(2, &allocator, meta2);
-
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta1;
+  PersistentSampleMap samples1(1, allocator1.get(), &meta1);
   samples1.Accumulate(1, 100);
   samples1.Accumulate(2, 100);
   samples1.Accumulate(3, 100);
 
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  HistogramSamples::Metadata meta2;
+  PersistentSampleMap samples2(2, allocator2.get(), &meta2);
   samples2.Accumulate(1, 200);
   samples2.Accumulate(2, 200);
   samples2.Accumulate(4, 200);
@@ -88,32 +98,36 @@
 }
 
 TEST(PersistentSampleMapTest, PersistenceTest) {
-  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
-
-  HistogramSamples::Metadata* meta12 =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples1(12, &allocator, meta12);
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta12;
+  PersistentSampleMap samples1(12, allocator1.get(), &meta12);
   samples1.Accumulate(1, 100);
   samples1.Accumulate(2, 200);
   samples1.Accumulate(1, -200);
+  samples1.Accumulate(-1, 1);
   EXPECT_EQ(-100, samples1.GetCount(1));
   EXPECT_EQ(200, samples1.GetCount(2));
-  EXPECT_EQ(300, samples1.sum());
-  EXPECT_EQ(100, samples1.TotalCount());
+  EXPECT_EQ(1, samples1.GetCount(-1));
+  EXPECT_EQ(299, samples1.sum());
+  EXPECT_EQ(101, samples1.TotalCount());
   EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
 
-  PersistentSampleMap samples2(12, &allocator, meta12);
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  PersistentSampleMap samples2(12, allocator2.get(), &meta12);
   EXPECT_EQ(samples1.id(), samples2.id());
   EXPECT_EQ(samples1.sum(), samples2.sum());
   EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
   EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
   EXPECT_EQ(-100, samples2.GetCount(1));
   EXPECT_EQ(200, samples2.GetCount(2));
-  EXPECT_EQ(300, samples2.sum());
-  EXPECT_EQ(100, samples2.TotalCount());
+  EXPECT_EQ(1, samples2.GetCount(-1));
+  EXPECT_EQ(299, samples2.sum());
+  EXPECT_EQ(101, samples2.TotalCount());
   EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
 
+  samples1.Accumulate(-1, -1);
   EXPECT_EQ(0, samples2.GetCount(3));
   EXPECT_EQ(0, samples1.GetCount(3));
   samples2.Accumulate(3, 300);
@@ -122,21 +136,31 @@
   EXPECT_EQ(samples1.sum(), samples2.sum());
   EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
   EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+
+  EXPECT_EQ(0, samples2.GetCount(4));
+  EXPECT_EQ(0, samples1.GetCount(4));
+  samples1.Accumulate(4, 400);
+  EXPECT_EQ(400, samples2.GetCount(4));
+  EXPECT_EQ(400, samples1.GetCount(4));
+  samples2.Accumulate(4, 4000);
+  EXPECT_EQ(4400, samples2.GetCount(4));
+  EXPECT_EQ(4400, samples1.GetCount(4));
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
 }
 
 TEST(PersistentSampleMapIteratorTest, IterateTest) {
-  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
-
-  HistogramSamples::Metadata* meta =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples(1, &allocator, meta);
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
   samples.Accumulate(1, 100);
   samples.Accumulate(2, 200);
   samples.Accumulate(4, -300);
   samples.Accumulate(5, 0);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   HistogramBase::Sample min;
   HistogramBase::Sample max;
@@ -165,29 +189,27 @@
 }
 
 TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
-  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+  std::unique_ptr<PersistentHistogramAllocator> allocator1 =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta1;
+  PersistentSampleMap samples1(1, allocator1.get(), &meta1);
+  samples1.Accumulate(5, 1);
+  samples1.Accumulate(10, 2);
+  samples1.Accumulate(15, 3);
+  samples1.Accumulate(20, 4);
+  samples1.Accumulate(25, 5);
 
-  HistogramSamples::Metadata* meta =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples(1, &allocator, meta);
-  samples.Accumulate(5, 1);
-  samples.Accumulate(10, 2);
-  samples.Accumulate(15, 3);
-  samples.Accumulate(20, 4);
-  samples.Accumulate(25, 5);
-
-  HistogramSamples::Metadata* meta2 =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples2(2, &allocator, meta2);
+  std::unique_ptr<PersistentHistogramAllocator> allocator2 =
+      DuplicateHistogramAllocator(allocator1.get());
+  HistogramSamples::Metadata meta2;
+  PersistentSampleMap samples2(2, allocator2.get(), &meta2);
   samples2.Accumulate(5, 1);
   samples2.Accumulate(20, 4);
   samples2.Accumulate(25, 5);
 
-  samples.Subtract(samples2);
+  samples1.Subtract(samples2);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples1.Iterator();
   EXPECT_FALSE(it->Done());
 
   HistogramBase::Sample min;
@@ -214,14 +236,12 @@
 // Only run this test on builds that support catching a DCHECK crash.
 #if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
 TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
-  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+  std::unique_ptr<PersistentHistogramAllocator> allocator =
+      CreateHistogramAllocator(64 << 10);  // 64 KiB
+  HistogramSamples::Metadata meta;
+  PersistentSampleMap samples(1, allocator.get(), &meta);
 
-  HistogramSamples::Metadata* meta =
-      allocator.GetAsObject<HistogramSamples::Metadata>(
-          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
-  PersistentSampleMap samples(1, &allocator, meta);
-
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   EXPECT_TRUE(it->Done());
 
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
index 21a4e35..8abd01e 100644
--- a/base/metrics/sample_map.cc
+++ b/base/metrics/sample_map.cc
@@ -5,6 +5,7 @@
 #include "base/metrics/sample_map.h"
 
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
 #include "base/stl_util.h"
 
 namespace base {
@@ -102,8 +103,8 @@
   return count;
 }
 
-scoped_ptr<SampleCountIterator> SampleMap::Iterator() const {
-  return make_scoped_ptr(new SampleMapIterator(sample_counts_));
+std::unique_ptr<SampleCountIterator> SampleMap::Iterator() const {
+  return WrapUnique(new SampleMapIterator(sample_counts_));
 }
 
 bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
index 2f24e1f..7458e05 100644
--- a/base/metrics/sample_map.h
+++ b/base/metrics/sample_map.h
@@ -11,10 +11,10 @@
 #include <stdint.h>
 
 #include <map>
+#include <memory>
 
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 
@@ -33,7 +33,7 @@
                   HistogramBase::Count count) override;
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
-  scoped_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
 
  protected:
   // Performs arithemetic. |op| is ADD or SUBTRACT.
diff --git a/base/metrics/sample_map_unittest.cc b/base/metrics/sample_map_unittest.cc
index 3626bd0..8f57710 100644
--- a/base/metrics/sample_map_unittest.cc
+++ b/base/metrics/sample_map_unittest.cc
@@ -4,7 +4,8 @@
 
 #include "base/metrics/sample_map.h"
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -76,7 +77,7 @@
   samples.Accumulate(4, -300);
   samples.Accumulate(5, 0);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   HistogramBase::Sample min;
   HistogramBase::Sample max;
@@ -119,7 +120,7 @@
 
   samples.Subtract(samples2);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
   EXPECT_FALSE(it->Done());
 
   HistogramBase::Sample min;
@@ -148,7 +149,7 @@
 TEST(SampleMapIteratorDeathTest, IterateDoneTest) {
   SampleMap samples(1);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   EXPECT_TRUE(it->Done());
 
diff --git a/base/metrics/sample_vector.cc b/base/metrics/sample_vector.cc
index e1603d9..7b056cb 100644
--- a/base/metrics/sample_vector.cc
+++ b/base/metrics/sample_vector.cc
@@ -26,7 +26,7 @@
 
 SampleVector::SampleVector(uint64_t id,
                            HistogramBase::AtomicCount* counts,
-                           size_t /* counts_size */,
+                           size_t /*counts_size*/,
                            Metadata* meta,
                            const BucketRanges* bucket_ranges)
     : HistogramSamples(id, meta),
@@ -41,8 +41,7 @@
 
 void SampleVector::Accumulate(Sample value, Count count) {
   size_t bucket_index = GetBucketIndex(value);
-  subtle::NoBarrier_Store(&counts_[bucket_index],
-      subtle::NoBarrier_Load(&counts_[bucket_index]) + count);
+  subtle::NoBarrier_AtomicIncrement(&counts_[bucket_index], count);
   IncreaseSum(static_cast<int64_t>(count) * value);
   IncreaseRedundantCount(count);
 }
@@ -65,8 +64,8 @@
   return subtle::NoBarrier_Load(&counts_[bucket_index]);
 }
 
-scoped_ptr<SampleCountIterator> SampleVector::Iterator() const {
-  return scoped_ptr<SampleCountIterator>(
+std::unique_ptr<SampleCountIterator> SampleVector::Iterator() const {
+  return std::unique_ptr<SampleCountIterator>(
       new SampleVectorIterator(counts_, counts_size_, bucket_ranges_));
 }
 
@@ -83,10 +82,8 @@
     if (min == bucket_ranges_->range(index) &&
         max == bucket_ranges_->range(index + 1)) {
       // Sample matches this bucket!
-      HistogramBase::Count old_counts =
-          subtle::NoBarrier_Load(&counts_[index]);
-      subtle::NoBarrier_Store(&counts_[index],
-          old_counts + ((op ==  HistogramSamples::ADD) ? count : -count));
+      subtle::NoBarrier_AtomicIncrement(
+          &counts_[index], op == HistogramSamples::ADD ? count : -count);
       iter->Next();
     } else if (min > bucket_ranges_->range(index)) {
       // Sample is larger than current bucket range. Try next.
diff --git a/base/metrics/sample_vector.h b/base/metrics/sample_vector.h
index 86319ea..ee26c52 100644
--- a/base/metrics/sample_vector.h
+++ b/base/metrics/sample_vector.h
@@ -11,12 +11,12 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/compiler_specific.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 
@@ -40,7 +40,7 @@
                   HistogramBase::Count count) override;
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
-  scoped_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> Iterator() const override;
 
   // Get count of a specific bucket.
   HistogramBase::Count GetCountAtIndex(size_t bucket_index) const;
diff --git a/base/metrics/sample_vector_unittest.cc b/base/metrics/sample_vector_unittest.cc
index 434def7..02e48aa 100644
--- a/base/metrics/sample_vector_unittest.cc
+++ b/base/metrics/sample_vector_unittest.cc
@@ -7,9 +7,9 @@
 #include <limits.h>
 #include <stddef.h>
 
+#include <memory>
 #include <vector>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -244,7 +244,7 @@
   samples.Accumulate(1, 1);
   samples.Accumulate(2, 2);
   samples.Accumulate(3, 3);
-  scoped_ptr<SampleCountIterator> it2 = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it2 = samples.Iterator();
 
   int i;
   for (i = 1; !it2->Done(); i++, it2->Next()) {
@@ -271,7 +271,7 @@
   ranges.set_range(4, INT_MAX);
   SampleVector samples(1, &ranges);
 
-  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   EXPECT_TRUE(it->Done());
 
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index 491fff0..4b7085a 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -6,6 +6,7 @@
 
 #include <utility>
 
+#include "base/memory/ptr_util.h"
 #include "base/metrics/metrics_hashes.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/persistent_sample_map.h"
@@ -23,12 +24,6 @@
 // static
 HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
                                            int32_t flags) {
-  // Import histograms from known persistent storage. Histograms could have
-  // been added by other processes and they must be fetched and recognized
-  // locally in order to be found by FindHistograms() below. If the persistent
-  // memory segment is not shared between processes, this call does nothing.
-  PersistentHistogramAllocator::ImportGlobalHistograms();
-
   HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
   if (!histogram) {
     // Try to create the histogram using a "persistent" allocator. As of
@@ -37,9 +32,8 @@
     // allocating from it fails, code below will allocate the histogram from
     // the process heap.
     PersistentMemoryAllocator::Reference histogram_ref = 0;
-    scoped_ptr<HistogramBase> tentative_histogram;
-    PersistentHistogramAllocator* allocator =
-        PersistentHistogramAllocator::GetGlobalAllocator();
+    std::unique_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
     if (allocator) {
       tentative_histogram = allocator->AllocateHistogram(
           SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
@@ -79,12 +73,12 @@
 }
 
 // static
-scoped_ptr<HistogramBase> SparseHistogram::PersistentCreate(
-    PersistentMemoryAllocator* allocator,
+std::unique_ptr<HistogramBase> SparseHistogram::PersistentCreate(
+    PersistentHistogramAllocator* allocator,
     const std::string& name,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return make_scoped_ptr(
+  return WrapUnique(
       new SparseHistogram(allocator, name, meta, logged_meta));
 }
 
@@ -99,9 +93,9 @@
 }
 
 bool SparseHistogram::HasConstructionArguments(
-    Sample /* expected_minimum */,
-    Sample /* expected_maximum */,
-    uint32_t /* expected_bucket_count */) const {
+    Sample /*expected_minimum*/,
+    Sample /*expected_maximum*/,
+    uint32_t /*expected_bucket_count*/) const {
   // SparseHistogram never has min/max/bucket_count limit.
   return false;
 }
@@ -123,16 +117,18 @@
   FindAndRunCallback(value);
 }
 
-scoped_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
-  scoped_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
 
   base::AutoLock auto_lock(lock_);
   snapshot->Add(*samples_);
   return std::move(snapshot);
 }
 
-scoped_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
-  scoped_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
+  DCHECK(!final_delta_created_);
+
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
   base::AutoLock auto_lock(lock_);
   snapshot->Add(*samples_);
 
@@ -142,6 +138,19 @@
   return std::move(snapshot);
 }
 
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotFinalDelta() const {
+  DCHECK(!final_delta_created_);
+  final_delta_created_ = true;
+
+  std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+  base::AutoLock auto_lock(lock_);
+  snapshot->Add(*samples_);
+
+  // Subtract what was previously logged and then return.
+  snapshot->Subtract(*logged_samples_);
+  return std::move(snapshot);
+}
+
 void SparseHistogram::AddSamples(const HistogramSamples& samples) {
   base::AutoLock auto_lock(lock_);
   samples_->Add(samples);
@@ -171,7 +180,7 @@
       samples_(new SampleMap(HashMetricName(name))),
       logged_samples_(new SampleMap(samples_->id())) {}
 
-SparseHistogram::SparseHistogram(PersistentMemoryAllocator* allocator,
+SparseHistogram::SparseHistogram(PersistentHistogramAllocator* allocator,
                                  const std::string& name,
                                  HistogramSamples::Metadata* meta,
                                  HistogramSamples::Metadata* logged_meta)
@@ -205,13 +214,13 @@
   return SparseHistogram::FactoryGet(histogram_name, flags);
 }
 
-void SparseHistogram::GetParameters(DictionaryValue* /* params */) const {
+void SparseHistogram::GetParameters(DictionaryValue* /*params*/) const {
   // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
 }
 
-void SparseHistogram::GetCountAndBucketData(Count* /* count */,
-                                            int64_t* /* sum */,
-                                            ListValue* /* buckets */) const {
+void SparseHistogram::GetCountAndBucketData(Count* /*count*/,
+                                            int64_t* /*sum*/,
+                                            ListValue* /*buckets*/) const {
   // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
 }
 
@@ -219,7 +228,7 @@
                                      const std::string& newline,
                                      std::string* output) const {
   // Get a local copy of the data so we are consistent.
-  scoped_ptr<HistogramSamples> snapshot = SnapshotSamples();
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
   Count total_count = snapshot->TotalCount();
   double scaled_total_count = total_count / 100.0;
 
@@ -232,7 +241,7 @@
   // normalize the graphical bar-width relative to that sample count.
   Count largest_count = 0;
   Sample largest_sample = 0;
-  scoped_ptr<SampleCountIterator> it = snapshot->Iterator();
+  std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
   while (!it->Done()) {
     Sample min;
     Sample max;
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
index b876737..3b302d6 100644
--- a/base/metrics/sparse_histogram.h
+++ b/base/metrics/sparse_histogram.h
@@ -9,13 +9,13 @@
 #include <stdint.h>
 
 #include <map>
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/sample_map.h"
 #include "base/synchronization/lock.h"
@@ -51,7 +51,7 @@
     } while (0)
 
 class HistogramSamples;
-class PersistentMemoryAllocator;
+class PersistentHistogramAllocator;
 
 class BASE_EXPORT SparseHistogram : public HistogramBase {
  public:
@@ -59,9 +59,10 @@
   // new one.
   static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
 
-  // Create a histogram using data in persistent storage.
-  static scoped_ptr<HistogramBase> PersistentCreate(
-      PersistentMemoryAllocator* allocator,
+  // Create a histogram using data in persistent storage. The allocator must
+  // live longer than the created sparse histogram.
+  static std::unique_ptr<HistogramBase> PersistentCreate(
+      PersistentHistogramAllocator* allocator,
       const std::string& name,
       HistogramSamples::Metadata* meta,
       HistogramSamples::Metadata* logged_meta);
@@ -78,8 +79,9 @@
   void AddCount(Sample value, int count) override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
-  scoped_ptr<HistogramSamples> SnapshotSamples() const override;
-  scoped_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotDelta() override;
+  std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
   void WriteHTMLGraph(std::string* output) const override;
   void WriteAscii(std::string* output) const override;
 
@@ -91,7 +93,7 @@
   // Clients should always use FactoryGet to create SparseHistogram.
   explicit SparseHistogram(const std::string& name);
 
-  SparseHistogram(PersistentMemoryAllocator* allocator,
+  SparseHistogram(PersistentHistogramAllocator* allocator,
                   const std::string& name,
                   HistogramSamples::Metadata* meta,
                   HistogramSamples::Metadata* logged_meta);
@@ -120,8 +122,11 @@
   // Protects access to |samples_|.
   mutable base::Lock lock_;
 
-  scoped_ptr<HistogramSamples> samples_;
-  scoped_ptr<HistogramSamples> logged_samples_;
+  // Flag to indicate if PrepareFinalDelta has been previously called.
+  mutable bool final_delta_created_ = false;
+
+  std::unique_ptr<HistogramSamples> samples_;
+  std::unique_ptr<HistogramSamples> logged_samples_;
 
   DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
 };
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index 5d5dbcb..fbff977 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -4,9 +4,9 @@
 
 #include "base/metrics/sparse_histogram.h"
 
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/persistent_histogram_allocator.h"
@@ -47,40 +47,37 @@
   }
 
   void InitializeStatisticsRecorder() {
-    StatisticsRecorder::ResetForTesting();
-    statistics_recorder_ = new StatisticsRecorder();
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_.reset(new StatisticsRecorder());
   }
 
   void UninitializeStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = NULL;
+    statistics_recorder_.reset();
   }
 
   void CreatePersistentMemoryAllocator() {
     // By getting the results-histogram before any persistent allocator
     // is attached, that histogram is guaranteed not to be stored in
     // any persistent memory segment (which simplifies some tests).
-    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+    GlobalHistogramAllocator::GetCreateHistogramResultHistogram();
 
-    PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+    GlobalHistogramAllocator::CreateWithLocalMemory(
         kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
-    allocator_ =
-        PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
+    allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
   }
 
   void DestroyPersistentMemoryAllocator() {
     allocator_ = nullptr;
-    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+    GlobalHistogramAllocator::ReleaseForTesting();
   }
 
-  scoped_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
-    return scoped_ptr<SparseHistogram>(new SparseHistogram(name));
+  std::unique_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
+    return std::unique_ptr<SparseHistogram>(new SparseHistogram(name));
   }
 
   const bool use_persistent_histogram_allocator_;
 
-  StatisticsRecorder* statistics_recorder_;
-  scoped_ptr<char[]> allocator_memory_;
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
   PersistentMemoryAllocator* allocator_ = nullptr;
 
  private:
@@ -94,57 +91,57 @@
 
 
 TEST_P(SparseHistogramTest, BasicTest) {
-  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
-  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(0, snapshot->TotalCount());
   EXPECT_EQ(0, snapshot->sum());
 
   histogram->Add(100);
-  scoped_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
   EXPECT_EQ(1, snapshot1->TotalCount());
   EXPECT_EQ(1, snapshot1->GetCount(100));
 
   histogram->Add(100);
   histogram->Add(101);
-  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
   EXPECT_EQ(3, snapshot2->TotalCount());
   EXPECT_EQ(2, snapshot2->GetCount(100));
   EXPECT_EQ(1, snapshot2->GetCount(101));
 }
 
 TEST_P(SparseHistogramTest, BasicTestAddCount) {
-  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
-  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(0, snapshot->TotalCount());
   EXPECT_EQ(0, snapshot->sum());
 
   histogram->AddCount(100, 15);
-  scoped_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
   EXPECT_EQ(15, snapshot1->TotalCount());
   EXPECT_EQ(15, snapshot1->GetCount(100));
 
   histogram->AddCount(100, 15);
   histogram->AddCount(101, 25);
-  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
   EXPECT_EQ(55, snapshot2->TotalCount());
   EXPECT_EQ(30, snapshot2->GetCount(100));
   EXPECT_EQ(25, snapshot2->GetCount(101));
 }
 
 TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
-  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
-  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(0, snapshot->TotalCount());
   EXPECT_EQ(0, snapshot->sum());
 
   histogram->AddCount(1000000000, 15);
-  scoped_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
   EXPECT_EQ(15, snapshot1->TotalCount());
   EXPECT_EQ(15, snapshot1->GetCount(1000000000));
 
   histogram->AddCount(1000000000, 15);
   histogram->AddCount(1010000000, 25);
-  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  std::unique_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
   EXPECT_EQ(55, snapshot2->TotalCount());
   EXPECT_EQ(30, snapshot2->GetCount(1000000000));
   EXPECT_EQ(25, snapshot2->GetCount(1010000000));
@@ -170,7 +167,8 @@
                                                : 0),
       sparse_histogram->flags());
 
-  scoped_ptr<HistogramSamples> samples = sparse_histogram->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> samples =
+      sparse_histogram->SnapshotSamples();
   EXPECT_EQ(3, samples->TotalCount());
   EXPECT_EQ(2, samples->GetCount(100));
   EXPECT_EQ(1, samples->GetCount(200));
@@ -195,7 +193,7 @@
 }
 
 TEST_P(SparseHistogramTest, Serialize) {
-  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   histogram->SetFlags(HistogramBase::kIPCSerializationSourceFlag);
 
   Pickle pickle;
@@ -219,6 +217,55 @@
   EXPECT_FALSE(iter.SkipBytes(1));
 }
 
+// Ensure that race conditions that cause multiple, identical sparse histograms
+// to be created will safely resolve to a single one.
+TEST_P(SparseHistogramTest, DuplicationSafety) {
+  const char histogram_name[] = "Duplicated";
+  size_t histogram_count = StatisticsRecorder::GetHistogramCount();
+
+  // Create a histogram that we will later duplicate.
+  HistogramBase* original =
+      SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+  ++histogram_count;
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+  original->Add(1);
+
+  // Create a duplicate. This has to happen differently depending on where the
+  // memory is taken from.
+  if (use_persistent_histogram_allocator_) {
+    // To allocate from persistent memory, clear the last_created reference in
+    // the GlobalHistogramAllocator. This will cause an Import to recreate
+    // the just-created histogram which will then be released as a duplicate.
+    GlobalHistogramAllocator::Get()->ClearLastCreatedReferenceForTesting();
+    // Creating a different histogram will first do an Import to ensure it
+    // hasn't been created elsewhere, triggering the duplication and release.
+    SparseHistogram::FactoryGet("something.new", HistogramBase::kNoFlags);
+    ++histogram_count;
+  } else {
+    // To allocate from the heap, just call the (private) constructor directly.
+    // Delete it immediately like would have happened within FactoryGet();
+    std::unique_ptr<SparseHistogram> something =
+        NewSparseHistogram(histogram_name);
+    DCHECK_NE(original, something.get());
+  }
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+
+  // Re-creating the histogram via FactoryGet() will return the same one.
+  HistogramBase* duplicate =
+      SparseHistogram::FactoryGet(histogram_name, HistogramBase::kNoFlags);
+  DCHECK_EQ(original, duplicate);
+  DCHECK_EQ(histogram_count, StatisticsRecorder::GetHistogramCount());
+  duplicate->Add(2);
+
+  // Ensure that original histograms are still cross-functional.
+  original->Add(2);
+  duplicate->Add(1);
+  std::unique_ptr<HistogramSamples> snapshot_orig = original->SnapshotSamples();
+  std::unique_ptr<HistogramSamples> snapshot_dup = duplicate->SnapshotSamples();
+  DCHECK_EQ(2, snapshot_orig->GetCount(2));
+  DCHECK_EQ(2, snapshot_dup->GetCount(1));
+}
+
 TEST_P(SparseHistogramTest, FactoryTime) {
   const int kTestCreateCount = 1 << 10;  // Must be power-of-2.
   const int kTestLookupCount = 100000;
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index 6156e72..cad3fd0 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -4,12 +4,15 @@
 
 #include "base/metrics/statistics_recorder.h"
 
+#include <memory>
+
 #include "base/at_exit.h"
+#include "base/debug/leak_annotations.h"
 #include "base/json/string_escape.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
@@ -34,6 +37,14 @@
     const HistogramMap::iterator& iter, bool include_persistent)
     : iter_(iter),
       include_persistent_(include_persistent) {
+  // The starting location could point to a persistent histogram when such
+  // is not wanted. If so, skip it.
+  if (!include_persistent_ && iter_ != histograms_->end() &&
+      (iter_->second->flags() & HistogramBase::kIsPersistent)) {
+    // This operator will continue to skip until a non-persistent histogram
+    // is found.
+    operator++();
+  }
 }
 
 StatisticsRecorder::HistogramIterator::HistogramIterator(
@@ -71,8 +82,12 @@
   DCHECK(histograms_);
   DCHECK(ranges_);
 
-  // Global clean up.
+  // Clean out what this object created and then restore what existed before.
   Reset();
+  base::AutoLock auto_lock(*lock_);
+  histograms_ = existing_histograms_.release();
+  callbacks_ = existing_callbacks_.release();
+  ranges_ = existing_ranges_.release();
 }
 
 // static
@@ -92,8 +107,13 @@
 // static
 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
     HistogramBase* histogram) {
-  // As per crbug.com/79322 the histograms are intentionally leaked.
+  // As per crbug.com/79322 the histograms are intentionally leaked, so we need
+  // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
+  // for an object, the duplicates should not be annotated.
+  // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
+  // twice if (lock_ == NULL) || (!histograms_).
   if (lock_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
     return histogram;
   }
 
@@ -105,11 +125,12 @@
       histogram_to_return = histogram;
     } else {
       const std::string& name = histogram->histogram_name();
-      const uint64_t name_hash = histogram->name_hash();
-      DCHECK_NE(0U, name_hash);
-      HistogramMap::iterator it = histograms_->find(name_hash);
+      HistogramMap::iterator it = histograms_->find(name);
       if (histograms_->end() == it) {
-        (*histograms_)[name_hash] = histogram;
+        // The StringKey references the name within |histogram| rather than
+        // making a copy.
+        (*histograms_)[name] = histogram;
+        ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
         // If there are callbacks for this histogram, we set the kCallbackExists
         // flag.
         auto callback_iterator = callbacks_->find(name);
@@ -140,14 +161,16 @@
 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
     const BucketRanges* ranges) {
   DCHECK(ranges->HasValidChecksum());
-  scoped_ptr<const BucketRanges> ranges_deleter;
+  std::unique_ptr<const BucketRanges> ranges_deleter;
 
   if (lock_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
     return ranges;
   }
 
   base::AutoLock auto_lock(*lock_);
   if (ranges_ == NULL) {
+    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
     return ranges;
   }
 
@@ -156,6 +179,7 @@
   if (ranges_->end() == ranges_it) {
     // Add a new matching list to map.
     checksum_matching_list = new std::list<const BucketRanges*>();
+    ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
     (*ranges_)[ranges->checksum()] = checksum_matching_list;
   } else {
     checksum_matching_list = ranges_it->second;
@@ -249,7 +273,6 @@
     return;
 
   for (const auto& entry : *histograms_) {
-    DCHECK_EQ(entry.first, entry.second->name_hash());
     output->push_back(entry.second);
   }
 }
@@ -272,28 +295,45 @@
 
 // static
 HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
+  // This must be called *before* the lock is acquired below because it will
+  // call back into this object to register histograms. Those called methods
+  // will acquire the lock at that time.
+  ImportGlobalPersistentHistograms();
+
   if (lock_ == NULL)
     return NULL;
   base::AutoLock auto_lock(*lock_);
   if (histograms_ == NULL)
     return NULL;
 
-  HistogramMap::iterator it = histograms_->find(HashMetricName(name));
+  HistogramMap::iterator it = histograms_->find(name);
   if (histograms_->end() == it)
     return NULL;
-  DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
   return it->second;
 }
 
 // static
 StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
     bool include_persistent) {
-  return HistogramIterator(histograms_->begin(), include_persistent);
+  DCHECK(histograms_);
+  ImportGlobalPersistentHistograms();
+
+  HistogramMap::iterator iter_begin;
+  {
+    base::AutoLock auto_lock(*lock_);
+    iter_begin = histograms_->begin();
+  }
+  return HistogramIterator(iter_begin, include_persistent);
 }
 
 // static
 StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
-  return HistogramIterator(histograms_->end(), true);
+  HistogramMap::iterator iter_end;
+  {
+    base::AutoLock auto_lock(*lock_);
+    iter_end = histograms_->end();
+  }
+  return HistogramIterator(iter_end, true);
 }
 
 // static
@@ -326,11 +366,9 @@
     return false;
   callbacks_->insert(std::make_pair(name, cb));
 
-  auto it = histograms_->find(HashMetricName(name));
-  if (it != histograms_->end()) {
-    DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
+  auto it = histograms_->find(name);
+  if (it != histograms_->end())
     it->second->SetFlags(HistogramBase::kCallbackExists);
-  }
 
   return true;
 }
@@ -346,11 +384,9 @@
   callbacks_->erase(name);
 
   // We also clear the flag from the histogram (if it exists).
-  auto it = histograms_->find(HashMetricName(name));
-  if (it != histograms_->end()) {
-    DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
+  auto it = histograms_->find(name);
+  if (it != histograms_->end())
     it->second->ClearFlags(HistogramBase::kCallbackExists);
-  }
 }
 
 // static
@@ -379,22 +415,46 @@
 }
 
 // static
-void StatisticsRecorder::ResetForTesting() {
-  // Just call the private version that is used also by the destructor.
-  Reset();
+void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
+  if (histograms_)
+    histograms_->erase(name);
 }
 
 // static
-void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
-  if (histograms_)
-    histograms_->erase(HashMetricName(name.as_string()));
+void StatisticsRecorder::UninitializeForTesting() {
+  // Stop now if it's never been initialized.
+  if (lock_ == NULL || histograms_ == NULL)
+    return;
+
+  // Get the global instance and destruct it. It's held in static memory so
+  // can't "delete" it; call the destructor explicitly.
+  DCHECK(g_statistics_recorder_.private_instance_);
+  g_statistics_recorder_.Get().~StatisticsRecorder();
+
+  // Now the ugly part. There's no official way to release a LazyInstance once
+  // created so it's necessary to clear out an internal variable which
+  // shouldn't be publicly visible but is for initialization reasons.
+  g_statistics_recorder_.private_instance_ = 0;
+}
+
+// static
+void StatisticsRecorder::ImportGlobalPersistentHistograms() {
+  if (lock_ == NULL)
+    return;
+
+  // Import histograms from known persistent storage. Histograms could have
+  // been added by other processes and they must be fetched and recognized
+  // locally. If the persistent memory segment is not shared between processes,
+  // this call does nothing.
+  GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+  if (allocator)
+    allocator->ImportHistogramsToStatisticsRecorder();
 }
 
 // This singleton instance should be started during the single threaded portion
 // of main(), and hence it is not thread safe.  It initializes globals to
 // provide support for all future calls.
 StatisticsRecorder::StatisticsRecorder() {
-  DCHECK(!histograms_);
   if (lock_ == NULL) {
     // This will leak on purpose. It's the only way to make sure we won't race
     // against the static uninitialization of the module while one of our
@@ -404,7 +464,13 @@
     // during static initialization and released only on  process termination.
     lock_ = new base::Lock;
   }
+
   base::AutoLock auto_lock(*lock_);
+
+  existing_histograms_.reset(histograms_);
+  existing_callbacks_.reset(callbacks_);
+  existing_ranges_.reset(ranges_);
+
   histograms_ = new HistogramMap;
   callbacks_ = new CallbackMap;
   ranges_ = new RangesMap;
@@ -419,9 +485,9 @@
   if (!lock_)
     return;
 
-  scoped_ptr<HistogramMap> histograms_deleter;
-  scoped_ptr<CallbackMap> callbacks_deleter;
-  scoped_ptr<RangesMap> ranges_deleter;
+  std::unique_ptr<HistogramMap> histograms_deleter;
+  std::unique_ptr<CallbackMap> callbacks_deleter;
+  std::unique_ptr<RangesMap> ranges_deleter;
   // We don't delete lock_ on purpose to avoid having to properly protect
   // against it going away after we checked for NULL in the static methods.
   {
@@ -437,7 +503,7 @@
 }
 
 // static
-void StatisticsRecorder::DumpHistogramsToVlog(void* /* instance */) {
+void StatisticsRecorder::DumpHistogramsToVlog(void* /*instance*/) {
   std::string output;
   StatisticsRecorder::WriteGraph(std::string(), &output);
   VLOG(1) << output;
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index 6eaf079..6c436c2 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -14,6 +14,7 @@
 
 #include <list>
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -32,7 +33,36 @@
 
 class BASE_EXPORT StatisticsRecorder {
  public:
-  typedef std::map<uint64_t, HistogramBase*> HistogramMap;  // Key is name-hash.
+  // A class used as a key for the histogram map below. It always references
+  // a string owned outside of this class, likely in the value of the map.
+  class StringKey : public StringPiece {
+   public:
+    // Constructs the StringKey using various sources. The source must live
+    // at least as long as the created object.
+    StringKey(const std::string& str) : StringPiece(str) {}
+    StringKey(StringPiece str) : StringPiece(str) {}
+
+    // Though StringPiece is better passed by value than by reference, in
+    // this case it's being passed many times and likely already been stored
+    // in memory (not just registers) so the benefit of pass-by-value is
+    // negated.
+    bool operator<(const StringKey& rhs) const {
+      // Since order is unimportant in the map and string comparisons can be
+      // slow, use the length as the primary sort value.
+      if (length() < rhs.length())
+        return true;
+      if (length() > rhs.length())
+        return false;
+
+      // Fall back to an actual string comparison. The lengths are the same
+      // so a simple memory-compare is sufficient. This is slightly more
+      // efficient than calling operator<() for StringPiece which would
+      // again have to check lengths before calling wordmemcmp().
+      return wordmemcmp(data(), rhs.data(), length()) < 0;
+    }
+  };
+
+  typedef std::map<StringKey, HistogramBase*> HistogramMap;
   typedef std::vector<HistogramBase*> Histograms;
 
   // A class for iterating over the histograms held within this global resource.
@@ -135,15 +165,15 @@
   // Returns the number of known histograms.
   static size_t GetHistogramCount();
 
-  // Clears all of the known histograms and resets static variables to a
-  // state that allows a new initialization.
-  static void ResetForTesting();
-
   // Removes a histogram from the internal set of known ones. This can be
   // necessary during testing persistent histograms where the underlying
   // memory is being released.
   static void ForgetHistogramForTesting(base::StringPiece name);
 
+  // Reset any global instance of the statistics-recorder that was created
+  // by a call to Initialize().
+  static void UninitializeForTesting();
+
  private:
   // We keep a map of callbacks to histograms, so that as histograms are
   // created, we can set the callback properly.
@@ -165,11 +195,22 @@
   FRIEND_TEST_ALL_PREFIXES(HistogramDeltaSerializationTest,
                            DeserializeHistogramAndAddSamples);
 
+  // Imports histograms from global persistent memory. The global lock must
+  // not be held during this call.
+  static void ImportGlobalPersistentHistograms();
+
   // The constructor just initializes static members. Usually client code should
   // use Initialize to do this. But in test code, you can friend this class and
   // call the constructor to get a clean StatisticsRecorder.
   StatisticsRecorder();
 
+  // These are copies of everything that existed when the (test) Statistics-
+  // Recorder was created. The global ones have to be moved aside to create a
+  // clean environment.
+  std::unique_ptr<HistogramMap> existing_histograms_;
+  std::unique_ptr<CallbackMap> existing_callbacks_;
+  std::unique_ptr<RangesMap> existing_ranges_;
+
   static void Reset();
   static void DumpHistogramsToVlog(void* instance);
 
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index 073cbb1..813fbd1 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -2,45 +2,57 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/metrics/statistics_recorder.h"
+
 #include <stddef.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/bind.h"
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/sparse_histogram.h"
-#include "base/metrics/statistics_recorder.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
-class StatisticsRecorderTest : public testing::Test {
+class StatisticsRecorderTest : public testing::TestWithParam<bool> {
  protected:
-  void SetUp() override {
+  const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
+
+  StatisticsRecorderTest() : use_persistent_histogram_allocator_(GetParam()) {
     // Get this first so it never gets created in persistent storage and will
     // not appear in the StatisticsRecorder after it is re-initialized.
     PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     InitializeStatisticsRecorder();
+
+    // Use persistent memory for histograms if so indicated by test parameter.
+    if (use_persistent_histogram_allocator_) {
+      GlobalHistogramAllocator::CreateWithLocalMemory(
+          kAllocatorMemorySize, 0, "StatisticsRecorderTest");
+    }
   }
 
-  void TearDown() override {
+  ~StatisticsRecorderTest() override {
+    GlobalHistogramAllocator::ReleaseForTesting();
     UninitializeStatisticsRecorder();
-    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
   }
 
   void InitializeStatisticsRecorder() {
-    statistics_recorder_ = new StatisticsRecorder();
+    DCHECK(!statistics_recorder_);
+    StatisticsRecorder::UninitializeForTesting();
+    statistics_recorder_.reset(new StatisticsRecorder());
   }
 
   void UninitializeStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = NULL;
+    statistics_recorder_.reset();
+    StatisticsRecorder::UninitializeForTesting();
   }
 
   Histogram* CreateHistogram(const std::string& name,
@@ -58,10 +70,27 @@
     delete histogram;
   }
 
-  StatisticsRecorder* statistics_recorder_;
+  int CountIterableHistograms(StatisticsRecorder::HistogramIterator* iter) {
+    int count = 0;
+    for (; *iter != StatisticsRecorder::end(); ++*iter) {
+      ++count;
+    }
+    return count;
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  std::unique_ptr<StatisticsRecorder> statistics_recorder_;
+  std::unique_ptr<GlobalHistogramAllocator> old_global_allocator_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StatisticsRecorderTest);
 };
 
-TEST_F(StatisticsRecorderTest, NotInitialized) {
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(Allocator, StatisticsRecorderTest, testing::Bool());
+
+TEST_P(StatisticsRecorderTest, NotInitialized) {
   UninitializeStatisticsRecorder();
 
   ASSERT_FALSE(StatisticsRecorder::IsActive());
@@ -89,7 +118,7 @@
   EXPECT_EQ(0u, registered_ranges.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterBucketRanges) {
+TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
   std::vector<const BucketRanges*> registered_ranges;
 
   BucketRanges* ranges1 = new BucketRanges(3);
@@ -127,7 +156,7 @@
   ASSERT_EQ(2u, registered_ranges.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterHistogram) {
+TEST_P(StatisticsRecorderTest, RegisterHistogram) {
   // Create a Histogram that was not registered.
   Histogram* histogram = CreateHistogram("TestHistogram", 1, 1000, 10);
 
@@ -149,7 +178,7 @@
   EXPECT_EQ(1u, registered_histograms.size());
 }
 
-TEST_F(StatisticsRecorderTest, FindHistogram) {
+TEST_P(StatisticsRecorderTest, FindHistogram) {
   HistogramBase* histogram1 = Histogram::FactoryGet(
       "TestHistogram1", 1, 1000, 10, HistogramBase::kNoFlags);
   HistogramBase* histogram2 = Histogram::FactoryGet(
@@ -157,10 +186,33 @@
 
   EXPECT_EQ(histogram1, StatisticsRecorder::FindHistogram("TestHistogram1"));
   EXPECT_EQ(histogram2, StatisticsRecorder::FindHistogram("TestHistogram2"));
-  EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram") == NULL);
+  EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
+
+  // Create a new global allocator using the same memory as the old one. Any
+  // old one is kept around so the memory doesn't get released.
+  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  if (use_persistent_histogram_allocator_) {
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        const_cast<void*>(old_global_allocator_->data()),
+        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+        old_global_allocator_->Name());
+  }
+
+  // Reset statistics-recorder to validate operation from a clean start.
+  UninitializeStatisticsRecorder();
+  InitializeStatisticsRecorder();
+
+  if (use_persistent_histogram_allocator_) {
+    EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+    EXPECT_TRUE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+  } else {
+    EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram1"));
+    EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram2"));
+  }
+  EXPECT_FALSE(StatisticsRecorder::FindHistogram("TestHistogram"));
 }
 
-TEST_F(StatisticsRecorderTest, GetSnapshot) {
+TEST_P(StatisticsRecorderTest, GetSnapshot) {
   Histogram::FactoryGet("TestHistogram1", 1, 1000, 10, Histogram::kNoFlags);
   Histogram::FactoryGet("TestHistogram2", 1, 1000, 10, Histogram::kNoFlags);
   Histogram::FactoryGet("TestHistogram3", 1, 1000, 10, Histogram::kNoFlags);
@@ -178,7 +230,7 @@
   EXPECT_EQ(0u, snapshot.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
   StatisticsRecorder::Histograms registered_histograms;
 
   StatisticsRecorder::GetHistograms(&registered_histograms);
@@ -224,7 +276,14 @@
   EXPECT_EQ(4u, registered_histograms.size());
 }
 
-TEST_F(StatisticsRecorderTest, RegisterHistogramWithMacros) {
+TEST_P(StatisticsRecorderTest, RegisterHistogramWithMacros) {
+  // Macros cache pointers and so tests that use them can only be run once.
+  // Stop immediately if this test has run previously.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
   StatisticsRecorder::Histograms registered_histograms;
 
   HistogramBase* histogram = Histogram::FactoryGet(
@@ -245,7 +304,7 @@
   EXPECT_EQ(3u, registered_histograms.size());
 }
 
-TEST_F(StatisticsRecorderTest, BucketRangesSharing) {
+TEST_P(StatisticsRecorderTest, BucketRangesSharing) {
   std::vector<const BucketRanges*> ranges;
   StatisticsRecorder::GetBucketRanges(&ranges);
   EXPECT_EQ(0u, ranges.size());
@@ -263,16 +322,20 @@
   EXPECT_EQ(2u, ranges.size());
 }
 
-TEST_F(StatisticsRecorderTest, ToJSON) {
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram1", 30);
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram1", 40);
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram2", 30);
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram2", 40);
+TEST_P(StatisticsRecorderTest, ToJSON) {
+  Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(30);
+  Histogram::FactoryGet("TestHistogram1", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(40);
+  Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(30);
+  Histogram::FactoryGet("TestHistogram2", 1, 1000, 50, HistogramBase::kNoFlags)
+      ->Add(40);
 
   std::string json(StatisticsRecorder::ToJSON(std::string()));
 
   // Check for valid JSON.
-  scoped_ptr<Value> root = JSONReader::Read(json);
+  std::unique_ptr<Value> root = JSONReader::Read(json);
   ASSERT_TRUE(root.get());
 
   DictionaryValue* root_dict = NULL;
@@ -322,21 +385,37 @@
   EXPECT_TRUE(json.empty());
 }
 
-TEST_F(StatisticsRecorderTest, IterationTest) {
-  StatisticsRecorder::Histograms registered_histograms;
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram.IterationTest1", 30);
-  PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
-      64 << 10 /* 64 KiB */, 0, "");
-  LOCAL_HISTOGRAM_COUNTS("TestHistogram.IterationTest2", 30);
+TEST_P(StatisticsRecorderTest, IterationTest) {
+  Histogram::FactoryGet("IterationTest1", 1, 64, 16, HistogramBase::kNoFlags);
+  Histogram::FactoryGet("IterationTest2", 1, 64, 16, HistogramBase::kNoFlags);
 
   StatisticsRecorder::HistogramIterator i1 = StatisticsRecorder::begin(true);
-  EXPECT_NE(StatisticsRecorder::end(), i1);
-  EXPECT_NE(StatisticsRecorder::end(), ++i1);
-  EXPECT_EQ(StatisticsRecorder::end(), ++i1);
+  EXPECT_EQ(2, CountIterableHistograms(&i1));
 
   StatisticsRecorder::HistogramIterator i2 = StatisticsRecorder::begin(false);
-  EXPECT_NE(StatisticsRecorder::end(), i2);
-  EXPECT_EQ(StatisticsRecorder::end(), ++i2);
+  EXPECT_EQ(use_persistent_histogram_allocator_ ? 0 : 2,
+            CountIterableHistograms(&i2));
+
+  // Create a new global allocator using the same memory as the old one. Any
+  // old one is kept around so the memory doesn't get released.
+  old_global_allocator_ = GlobalHistogramAllocator::ReleaseForTesting();
+  if (use_persistent_histogram_allocator_) {
+    GlobalHistogramAllocator::CreateWithPersistentMemory(
+        const_cast<void*>(old_global_allocator_->data()),
+        old_global_allocator_->length(), 0, old_global_allocator_->Id(),
+        old_global_allocator_->Name());
+  }
+
+  // Reset statistics-recorder to validate operation from a clean start.
+  UninitializeStatisticsRecorder();
+  InitializeStatisticsRecorder();
+
+  StatisticsRecorder::HistogramIterator i3 = StatisticsRecorder::begin(true);
+  EXPECT_EQ(use_persistent_histogram_allocator_ ? 2 : 0,
+            CountIterableHistograms(&i3));
+
+  StatisticsRecorder::HistogramIterator i4 = StatisticsRecorder::begin(false);
+  EXPECT_EQ(0, CountIterableHistograms(&i4));
 }
 
 namespace {
@@ -358,7 +437,7 @@
 }  // namespace
 
 // Check that you can't overwrite the callback with another.
-TEST_F(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
   CallbackCheckWrapper callback_wrapper;
 
   bool result = base::StatisticsRecorder::SetCallback(
@@ -373,7 +452,7 @@
 }
 
 // Check that you can't overwrite the callback with another.
-TEST_F(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
+TEST_P(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
   HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
                                                    HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
@@ -400,7 +479,7 @@
 }
 
 // Check that you can't overwrite the callback with another.
-TEST_F(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
+TEST_P(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
   HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
                                                    HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
@@ -423,7 +502,7 @@
 }
 
 // Check that callback is used.
-TEST_F(StatisticsRecorderTest, CallbackUsedTest) {
+TEST_P(StatisticsRecorderTest, CallbackUsedTest) {
   {
     HistogramBase* histogram = Histogram::FactoryGet(
         "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
@@ -497,7 +576,7 @@
 }
 
 // Check that setting a callback before the histogram exists works.
-TEST_F(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
+TEST_P(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
   CallbackCheckWrapper callback_wrapper;
 
   base::StatisticsRecorder::SetCallback(
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
index 55467e6..169a063 100644
--- a/base/metrics/user_metrics.cc
+++ b/base/metrics/user_metrics.cc
@@ -8,70 +8,67 @@
 
 #include <vector>
 
+#include "base/bind.h"
 #include "base/lazy_instance.h"
+#include "base/location.h"
 #include "base/macros.h"
 #include "base/threading/thread_checker.h"
 
 namespace base {
 namespace {
 
-// A helper class for tracking callbacks and ensuring thread-safety.
-class Callbacks {
- public:
-  Callbacks() {}
-
-  // Records the |action|.
-  void Record(const std::string& action) {
-    DCHECK(thread_checker_.CalledOnValidThread());
-    for (size_t i = 0; i < callbacks_.size(); ++i) {
-      callbacks_[i].Run(action);
-    }
-  }
-
-  // Adds |callback| to the list of |callbacks_|.
-  void AddCallback(const ActionCallback& callback) {
-    DCHECK(thread_checker_.CalledOnValidThread());
-    callbacks_.push_back(callback);
-  }
-
-  // Removes the first instance of |callback| from the list of |callbacks_|, if
-  // there is one.
-  void RemoveCallback(const ActionCallback& callback) {
-    DCHECK(thread_checker_.CalledOnValidThread());
-    for (size_t i = 0; i < callbacks_.size(); ++i) {
-      if (callbacks_[i].Equals(callback)) {
-        callbacks_.erase(callbacks_.begin() + i);
-        return;
-      }
-    }
-  }
-
- private:
-  base::ThreadChecker thread_checker_;
-  std::vector<ActionCallback> callbacks_;
-
-  DISALLOW_COPY_AND_ASSIGN(Callbacks);
-};
-
-base::LazyInstance<Callbacks> g_callbacks = LAZY_INSTANCE_INITIALIZER;
+LazyInstance<std::vector<ActionCallback>> g_callbacks =
+    LAZY_INSTANCE_INITIALIZER;
+LazyInstance<scoped_refptr<SingleThreadTaskRunner>> g_task_runner =
+    LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
 
 void RecordAction(const UserMetricsAction& action) {
-  g_callbacks.Get().Record(action.str_);
+  RecordComputedAction(action.str_);
 }
 
 void RecordComputedAction(const std::string& action) {
-  g_callbacks.Get().Record(action);
+  if (!g_task_runner.Get()) {
+    DCHECK(g_callbacks.Get().empty());
+    return;
+  }
+
+  if (!g_task_runner.Get()->BelongsToCurrentThread()) {
+    g_task_runner.Get()->PostTask(FROM_HERE,
+                                  Bind(&RecordComputedAction, action));
+    return;
+  }
+
+  for (const ActionCallback& callback : g_callbacks.Get()) {
+    callback.Run(action);
+  }
 }
 
 void AddActionCallback(const ActionCallback& callback) {
-  g_callbacks.Get().AddCallback(callback);
+  // Only allow adding a callback if the task runner is set.
+  DCHECK(g_task_runner.Get());
+  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+  g_callbacks.Get().push_back(callback);
 }
 
 void RemoveActionCallback(const ActionCallback& callback) {
-  g_callbacks.Get().RemoveCallback(callback);
+  DCHECK(g_task_runner.Get());
+  DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
+  std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
+  for (size_t i = 0; i < callbacks->size(); ++i) {
+    if ((*callbacks)[i].Equals(callback)) {
+      callbacks->erase(callbacks->begin() + i);
+      return;
+    }
+  }
+}
 
+void SetRecordActionTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
+  DCHECK(task_runner->BelongsToCurrentThread());
+  DCHECK(!g_task_runner.Get() || g_task_runner.Get()->BelongsToCurrentThread());
+  g_task_runner.Get() = task_runner;
 }
 
 }  // namespace base
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
index bcfefb8..c80bac0 100644
--- a/base/metrics/user_metrics.h
+++ b/base/metrics/user_metrics.h
@@ -10,6 +10,7 @@
 #include "base/base_export.h"
 #include "base/callback.h"
 #include "base/metrics/user_metrics_action.h"
+#include "base/single_thread_task_runner.h"
 
 namespace base {
 
@@ -17,7 +18,8 @@
 // the user metrics system.
 
 // Record that the user performed an action.
-// This method *must* be called from the main thread.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
 //
 // "Action" here means a user-generated event:
 //   good: "Reload", "CloseTab", and "IMEInvoked"
@@ -38,23 +40,31 @@
 // for review!
 //
 // For more complicated situations (like when there are many different
-// possible actions), see RecordComputedAction.
+// possible actions), see RecordComputedAction().
 BASE_EXPORT void RecordAction(const UserMetricsAction& action);
 
-// This function has identical input and behavior to RecordAction, but is
+// This function has identical input and behavior to RecordAction(), but is
 // not automatically found by the action-processing scripts.  It can be used
 // when it's a pain to enumerate all possible actions, but if you use this
 // you need to also update the rules for extracting known actions in
 // tools/metrics/actions/extract_actions.py.
+// This function must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
 BASE_EXPORT void RecordComputedAction(const std::string& action);
 
 // Called with the action string.
-typedef base::Callback<void(const std::string&)> ActionCallback;
+typedef Callback<void(const std::string&)> ActionCallback;
 
 // Add/remove action callbacks (see above).
+// These functions must be called after the task runner has been set with
+// SetRecordActionTaskRunner().
 BASE_EXPORT void AddActionCallback(const ActionCallback& callback);
 BASE_EXPORT void RemoveActionCallback(const ActionCallback& callback);
 
+// Set the task runner on which to record actions.
+BASE_EXPORT void SetRecordActionTaskRunner(
+    scoped_refptr<SingleThreadTaskRunner> task_runner);
+
 }  // namespace base
 
 #endif  // BASE_METRICS_USER_METRICS_H_
diff --git a/base/numerics/safe_conversions.h b/base/numerics/safe_conversions.h
index baac188..6b558af 100644
--- a/base/numerics/safe_conversions.h
+++ b/base/numerics/safe_conversions.h
@@ -18,7 +18,7 @@
 // Convenience function that returns true if the supplied value is in range
 // for the destination type.
 template <typename Dst, typename Src>
-inline bool IsValueInRangeForNumericType(Src value) {
+constexpr bool IsValueInRangeForNumericType(Src value) {
   return internal::DstRangeRelationToSrcRange<Dst>(value) ==
          internal::RANGE_VALID;
 }
@@ -26,7 +26,7 @@
 // Convenience function for determining if a numeric value is negative without
 // throwing compiler warnings on: unsigned(value) < 0.
 template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+constexpr typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
 IsValueNegative(T value) {
   static_assert(std::numeric_limits<T>::is_specialized,
                 "Argument must be numeric.");
@@ -34,8 +34,8 @@
 }
 
 template <typename T>
-typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
-    IsValueNegative(T) {
+constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed,
+                                  bool>::type IsValueNegative(T) {
   static_assert(std::numeric_limits<T>::is_specialized,
                 "Argument must be numeric.");
   return false;
@@ -62,11 +62,29 @@
 // HandleNaN will return 0 in this case.
 struct SaturatedCastNaNBehaviorReturnZero {
   template <typename T>
-  static T HandleNaN() {
+  static constexpr T HandleNaN() {
     return T();
   }
 };
 
+namespace internal {
+// This wrapper is used for C++11 constexpr support by avoiding the declaration
+// of local variables in the saturated_cast template function.
+template <typename Dst, class NaNHandler, typename Src>
+constexpr Dst saturated_cast_impl(const Src value,
+                                  const RangeConstraint constraint) {
+  return constraint == RANGE_VALID
+             ? static_cast<Dst>(value)
+             : (constraint == RANGE_UNDERFLOW
+                    ? std::numeric_limits<Dst>::min()
+                    : (constraint == RANGE_OVERFLOW
+                           ? std::numeric_limits<Dst>::max()
+                           : (constraint == RANGE_INVALID
+                                  ? NaNHandler::template HandleNaN<Dst>()
+                                  : (NOTREACHED(), static_cast<Dst>(value)))));
+}
+}  // namespace internal
+
 // saturated_cast<> is analogous to static_cast<> for numeric types, except
 // that the specified numeric conversion will saturate rather than overflow or
 // underflow. NaN assignment to an integral will defer the behavior to a
@@ -74,35 +92,18 @@
 template <typename Dst,
           class NaNHandler = SaturatedCastNaNBehaviorReturnZero,
           typename Src>
-inline Dst saturated_cast(Src value) {
-  // Optimization for floating point values, which already saturate.
-  if (std::numeric_limits<Dst>::is_iec559)
-    return static_cast<Dst>(value);
-
-  switch (internal::DstRangeRelationToSrcRange<Dst>(value)) {
-    case internal::RANGE_VALID:
-      return static_cast<Dst>(value);
-
-    case internal::RANGE_UNDERFLOW:
-      return std::numeric_limits<Dst>::min();
-
-    case internal::RANGE_OVERFLOW:
-      return std::numeric_limits<Dst>::max();
-
-    // Should fail only on attempting to assign NaN to a saturated integer.
-    case internal::RANGE_INVALID:
-      return NaNHandler::template HandleNaN<Dst>();
-  }
-
-  NOTREACHED();
-  return static_cast<Dst>(value);
+constexpr Dst saturated_cast(Src value) {
+  return std::numeric_limits<Dst>::is_iec559
+             ? static_cast<Dst>(value)  // Floating point optimization.
+             : internal::saturated_cast_impl<Dst, NaNHandler>(
+                   value, internal::DstRangeRelationToSrcRange<Dst>(value));
 }
 
 // strict_cast<> is analogous to static_cast<> for numeric types, except that
 // it will cause a compile failure if the destination type is not large enough
 // to contain any value in the source type. It performs no runtime checking.
 template <typename Dst, typename Src>
-inline Dst strict_cast(Src value) {
+constexpr Dst strict_cast(Src value) {
   static_assert(std::numeric_limits<Src>::is_specialized,
                 "Argument must be numeric.");
   static_assert(std::numeric_limits<Dst>::is_specialized,
@@ -128,33 +129,33 @@
 // compiles cleanly with truncation warnings enabled.
 // This template should introduce no runtime overhead, but it also provides no
 // runtime checking of any of the associated mathematical operations. Use
-// CheckedNumeric for runtime range checks of tha actual value being assigned.
+// CheckedNumeric for runtime range checks of the actual value being assigned.
 template <typename T>
 class StrictNumeric {
  public:
   typedef T type;
 
-  StrictNumeric() : value_(0) {}
+  constexpr StrictNumeric() : value_(0) {}
 
   // Copy constructor.
   template <typename Src>
-  StrictNumeric(const StrictNumeric<Src>& rhs)
+  constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
       : value_(strict_cast<T>(rhs.value_)) {}
 
   // This is not an explicit constructor because we implicitly upgrade regular
   // numerics to StrictNumerics to make them easier to use.
   template <typename Src>
-  StrictNumeric(Src value)
+  constexpr StrictNumeric(Src value)
       : value_(strict_cast<T>(value)) {}
 
   // The numeric cast operator basically handles all the magic.
   template <typename Dst>
-  operator Dst() const {
+  constexpr operator Dst() const {
     return strict_cast<Dst>(value_);
   }
 
  private:
-  T value_;
+  const T value_;
 };
 
 // Explicitly make a shorter size_t typedef for convenience.
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
index 03e7ee6..0f0aebc 100644
--- a/base/numerics/safe_conversions_impl.h
+++ b/base/numerics/safe_conversions_impl.h
@@ -8,6 +8,9 @@
 #include <limits.h>
 #include <stdint.h>
 
+#include <climits>
+#include <limits>
+
 namespace base {
 namespace internal {
 
@@ -16,9 +19,11 @@
 // for accurate range comparisons between floating point and integer types.
 template <typename NumericType>
 struct MaxExponent {
+  static_assert(std::is_arithmetic<NumericType>::value,
+                "Argument must be numeric.");
   static const int value = std::numeric_limits<NumericType>::is_iec559
                                ? std::numeric_limits<NumericType>::max_exponent
-                               : (sizeof(NumericType) * 8 + 1 -
+                               : (sizeof(NumericType) * CHAR_BIT + 1 -
                                   std::numeric_limits<NumericType>::is_signed);
 };
 
@@ -92,17 +97,18 @@
 };
 
 // Helper function for coercing an int back to a RangeContraint.
-inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
-  DCHECK(integer_range_constraint >= RANGE_VALID &&
-         integer_range_constraint <= RANGE_INVALID);
+constexpr RangeConstraint GetRangeConstraint(int integer_range_constraint) {
+  // TODO(jschuh): Once we get full C++14 support we want this
+  // assert(integer_range_constraint >= RANGE_VALID &&
+  //        integer_range_constraint <= RANGE_INVALID)
   return static_cast<RangeConstraint>(integer_range_constraint);
 }
 
 // This function creates a RangeConstraint from an upper and lower bound
 // check by taking advantage of the fact that only NaN can be out of range in
 // both directions at once.
-inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
-                                   bool is_in_lower_bound) {
+constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
+                                                    bool is_in_lower_bound) {
   return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
                             (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
 }
@@ -132,25 +138,24 @@
 struct NarrowingRange {
   typedef typename std::numeric_limits<Src> SrcLimits;
   typedef typename std::numeric_limits<Dst> DstLimits;
+  // The following logic avoids warnings where the max function is
+  // instantiated with invalid values for a bit shift (even though
+  // such a function can never be called).
+  static const int shift = (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+                            SrcLimits::digits < DstLimits::digits &&
+                            SrcLimits::is_iec559 &&
+                            DstLimits::is_integer)
+                               ? (DstLimits::digits - SrcLimits::digits)
+                               : 0;
 
-  static Dst max() {
-    // The following logic avoids warnings where the max function is
-    // instantiated with invalid values for a bit shift (even though
-    // such a function can never be called).
-    static const int shift =
-        (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
-         SrcLimits::digits < DstLimits::digits && SrcLimits::is_iec559 &&
-         DstLimits::is_integer)
-            ? (DstLimits::digits - SrcLimits::digits)
-            : 0;
-
+  static constexpr Dst max() {
     // We use UINTMAX_C below to avoid compiler warnings about shifting floating
     // points. Since it's a compile time calculation, it shouldn't have any
     // performance impact.
     return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
   }
 
-  static Dst min() {
+  static constexpr Dst min() {
     return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
                                                : DstLimits::min();
   }
@@ -183,7 +188,7 @@
                                       DstSign,
                                       SrcSign,
                                       NUMERIC_RANGE_CONTAINED> {
-  static RangeConstraint Check(Src /* value */) { return RANGE_VALID; }
+  static constexpr RangeConstraint Check(Src /*value*/) { return RANGE_VALID; }
 };
 
 // Signed to signed narrowing: Both the upper and lower boundaries may be
@@ -194,7 +199,7 @@
                                       INTEGER_REPRESENTATION_SIGNED,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
                               (value >= NarrowingRange<Dst, Src>::min()));
   }
@@ -207,7 +212,7 @@
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
   }
 };
@@ -219,7 +224,7 @@
                                       INTEGER_REPRESENTATION_SIGNED,
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return sizeof(Dst) > sizeof(Src)
                ? RANGE_VALID
                : GetRangeConstraint(
@@ -236,7 +241,7 @@
                                       INTEGER_REPRESENTATION_UNSIGNED,
                                       INTEGER_REPRESENTATION_SIGNED,
                                       NUMERIC_RANGE_NOT_CONTAINED> {
-  static RangeConstraint Check(Src value) {
+  static constexpr RangeConstraint Check(Src value) {
     return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
                ? GetRangeConstraint(true, value >= static_cast<Src>(0))
                : GetRangeConstraint(
@@ -246,7 +251,7 @@
 };
 
 template <typename Dst, typename Src>
-inline RangeConstraint DstRangeRelationToSrcRange(Src value) {
+constexpr RangeConstraint DstRangeRelationToSrcRange(Src value) {
   static_assert(std::numeric_limits<Src>::is_specialized,
                 "Argument must be numeric.");
   static_assert(std::numeric_limits<Dst>::is_specialized,
diff --git a/base/numerics/safe_math.h b/base/numerics/safe_math.h
index 9757f1c..d0003b7 100644
--- a/base/numerics/safe_math.h
+++ b/base/numerics/safe_math.h
@@ -6,8 +6,11 @@
 #define BASE_NUMERICS_SAFE_MATH_H_
 
 #include <stddef.h>
+
+#include <limits>
 #include <type_traits>
 
+#include "base/logging.h"
 #include "base/numerics/safe_math_impl.h"
 
 namespace base {
@@ -45,6 +48,9 @@
 //     Do stuff...
 template <typename T>
 class CheckedNumeric {
+  static_assert(std::is_arithmetic<T>::value,
+                "CheckedNumeric<T>: T must be a numeric type.");
+
  public:
   typedef T type;
 
@@ -62,7 +68,7 @@
   // This is not an explicit constructor because we implicitly upgrade regular
   // numerics to CheckedNumerics to make them easier to use.
   template <typename Src>
-  CheckedNumeric(Src value)
+  CheckedNumeric(Src value)  // NOLINT(runtime/explicit)
       : state_(value) {
     static_assert(std::numeric_limits<Src>::is_specialized,
                   "Argument must be numeric.");
@@ -71,7 +77,7 @@
   // This is not an explicit constructor because we want a seamless conversion
   // from StrictNumeric types.
   template <typename Src>
-  CheckedNumeric(StrictNumeric<Src> value)
+  CheckedNumeric(StrictNumeric<Src> value)  // NOLINT(runtime/explicit)
       : state_(static_cast<Src>(value)) {
   }
 
@@ -232,10 +238,9 @@
           lhs.ValueUnsafe() OP rhs.ValueUnsafe(),                             \
           GetRangeConstraint(rhs.validity() | lhs.validity()));               \
     RangeConstraint validity = RANGE_VALID;                                   \
-    T result = static_cast<T>(Checked##NAME(                                  \
-        static_cast<Promotion>(lhs.ValueUnsafe()),                            \
-        static_cast<Promotion>(rhs.ValueUnsafe()),                            \
-        &validity));                                                          \
+    T result = static_cast<T>(                                                \
+        Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()),              \
+                      static_cast<Promotion>(rhs.ValueUnsafe()), &validity)); \
     return CheckedNumeric<Promotion>(                                         \
         result,                                                               \
         GetRangeConstraint(validity | lhs.validity() | rhs.validity()));      \
@@ -261,7 +266,9 @@
         OP CheckedNumeric<Promotion>::cast(rhs);                              \
   }                                                                           \
   /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
-  template <typename T, typename Src>                                         \
+  template <typename T, typename Src,                                         \
+            typename std::enable_if<std::is_arithmetic<Src>::value>::type* =  \
+                nullptr>                                                      \
   CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
       const CheckedNumeric<T>& lhs, Src rhs) {                                \
     typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
@@ -271,8 +278,10 @@
     return CheckedNumeric<Promotion>::cast(lhs)                               \
         OP CheckedNumeric<Promotion>::cast(rhs);                              \
   }                                                                           \
-  /* Binary arithmetic operator for right numeric and left CheckedNumeric. */ \
-  template <typename T, typename Src>                                         \
+  /* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \
+  template <typename T, typename Src,                                         \
+            typename std::enable_if<std::is_arithmetic<Src>::value>::type* =  \
+                nullptr>                                                      \
   CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP(     \
       Src lhs, const CheckedNumeric<T>& rhs) {                                \
     typedef typename ArithmeticPromotion<T, Src>::type Promotion;             \
diff --git a/base/numerics/safe_math_impl.h b/base/numerics/safe_math_impl.h
index 487b3bc..f214f3f 100644
--- a/base/numerics/safe_math_impl.h
+++ b/base/numerics/safe_math_impl.h
@@ -8,6 +8,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <climits>
 #include <cmath>
 #include <cstdlib>
 #include <limits>
@@ -89,7 +90,7 @@
 struct PositionOfSignBit {
   static const typename std::enable_if<std::numeric_limits<Integer>::is_integer,
                                        size_t>::type value =
-      8 * sizeof(Integer) - 1;
+      CHAR_BIT * sizeof(Integer) - 1;
 };
 
 // This is used for UnsignedAbs, where we need to support floating-point
@@ -114,7 +115,7 @@
 // Helper templates for integer manipulations.
 
 template <typename T>
-bool HasSignBit(T x) {
+constexpr bool HasSignBit(T x) {
   // Cast to unsigned since right shift on signed is undefined.
   return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
             PositionOfSignBit<T>::value);
@@ -122,8 +123,8 @@
 
 // This wrapper undoes the standard integer promotions.
 template <typename T>
-T BinaryComplement(T x) {
-  return ~x;
+constexpr T BinaryComplement(T x) {
+  return static_cast<T>(~x);
 }
 
 // Here are the actual portable checked integer math implementations.
@@ -138,15 +139,16 @@
   typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
   UnsignedDst ux = static_cast<UnsignedDst>(x);
   UnsignedDst uy = static_cast<UnsignedDst>(y);
-  UnsignedDst uresult = ux + uy;
+  UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
   // Addition is valid if the sign of (x + y) is equal to either that of x or
   // that of y.
   if (std::numeric_limits<T>::is_signed) {
-    if (HasSignBit(BinaryComplement((uresult ^ ux) & (uresult ^ uy))))
+    if (HasSignBit(BinaryComplement(
+            static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))) {
       *validity = RANGE_VALID;
-    else  // Direction of wrap is inverse of result sign.
+    } else {  // Direction of wrap is inverse of result sign.
       *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
-
+    }
   } else {  // Unsigned is either valid or overflow.
     *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
   }
@@ -161,15 +163,16 @@
   typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
   UnsignedDst ux = static_cast<UnsignedDst>(x);
   UnsignedDst uy = static_cast<UnsignedDst>(y);
-  UnsignedDst uresult = ux - uy;
+  UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
   // Subtraction is valid if either x and y have same sign, or (x-y) and x have
   // the same sign.
   if (std::numeric_limits<T>::is_signed) {
-    if (HasSignBit(BinaryComplement((uresult ^ ux) & (ux ^ uy))))
+    if (HasSignBit(BinaryComplement(
+            static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))) {
       *validity = RANGE_VALID;
-    else  // Direction of wrap is inverse of result sign.
+    } else {  // Direction of wrap is inverse of result sign.
       *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
-
+    }
   } else {  // Unsigned is either valid or underflow.
     *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
   }
@@ -200,7 +203,8 @@
 CheckedMul(T x, T y, RangeConstraint* validity) {
   // If either side is zero then the result will be zero.
   if (!x || !y) {
-    return RANGE_VALID;
+    *validity = RANGE_VALID;
+    return static_cast<T>(0);
 
   } else if (x > 0) {
     if (y > 0)
@@ -219,7 +223,7 @@
           y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
   }
 
-  return x * y;
+  return static_cast<T>(x * y);
 }
 
 template <typename T>
@@ -231,7 +235,7 @@
   *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
                   ? RANGE_VALID
                   : RANGE_OVERFLOW;
-  return x * y;
+  return static_cast<T>(x * y);
 }
 
 // Division just requires a check for an invalid negation on signed min/-1.
@@ -248,7 +252,7 @@
   }
 
   *validity = RANGE_VALID;
-  return x / y;
+  return static_cast<T>(x / y);
 }
 
 template <typename T>
@@ -257,7 +261,7 @@
                         T>::type
 CheckedMod(T x, T y, RangeConstraint* validity) {
   *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
-  return x % y;
+  return static_cast<T>(x % y);
 }
 
 template <typename T>
@@ -266,7 +270,7 @@
                         T>::type
 CheckedMod(T x, T y, RangeConstraint* validity) {
   *validity = RANGE_VALID;
-  return x % y;
+  return static_cast<T>(x % y);
 }
 
 template <typename T>
@@ -277,7 +281,7 @@
   *validity =
       value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
   // The negation of signed min is min, so catch that one.
-  return -value;
+  return static_cast<T>(-value);
 }
 
 template <typename T>
@@ -328,7 +332,7 @@
                         T>::type
 CheckedUnsignedAbs(T value) {
   // T is unsigned, so |value| must already be positive.
-  return value;
+  return static_cast<T>(value);
 }
 
 // These are the floating point stubs that the compiler needs to see. Only the
@@ -338,7 +342,7 @@
   typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type \
       Checked##NAME(T, T, RangeConstraint*) {                         \
     NOTREACHED();                                                     \
-    return 0;                                                         \
+    return static_cast<T>(0);                                         \
   }
 
 BASE_FLOAT_ARITHMETIC_STUBS(Add)
@@ -353,14 +357,14 @@
 typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
     T value,
     RangeConstraint*) {
-  return -value;
+  return static_cast<T>(-value);
 }
 
 template <typename T>
 typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
     T value,
     RangeConstraint*) {
-  return std::abs(value);
+  return static_cast<T>(std::abs(value));
 }
 
 // Floats carry around their validity state with them, but integers do not. So,
@@ -390,7 +394,7 @@
 class CheckedNumericState<T, NUMERIC_INTEGER> {
  private:
   T value_;
-  RangeConstraint validity_;
+  RangeConstraint validity_ : CHAR_BIT;  // Actually requires only two bits.
 
  public:
   template <typename Src, NumericRepresentation type>
@@ -441,7 +445,7 @@
   template <typename Src>
   CheckedNumericState(
       Src value,
-      RangeConstraint /* validity */,
+      RangeConstraint /*validity*/,
       typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type =
           0) {
     switch (DstRangeRelationToSrcRange<T>(value)) {
@@ -485,27 +489,16 @@
   T value() const { return value_; }
 };
 
-// For integers less than 128-bit and floats 32-bit or larger, we can distil
-// C/C++ arithmetic promotions down to two simple rules:
-// 1. The type with the larger maximum exponent always takes precedence.
-// 2. The resulting type must be promoted to at least an int.
-// The following template specializations implement that promotion logic.
-enum ArithmeticPromotionCategory {
-  LEFT_PROMOTION,
-  RIGHT_PROMOTION,
-  DEFAULT_PROMOTION
-};
+// For integers less than 128-bit and floats 32-bit or larger, we have the type
+// with the larger maximum exponent take precedence.
+enum ArithmeticPromotionCategory { LEFT_PROMOTION, RIGHT_PROMOTION };
 
 template <typename Lhs,
           typename Rhs = Lhs,
           ArithmeticPromotionCategory Promotion =
               (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
-                  ? (MaxExponent<Lhs>::value > MaxExponent<int>::value
-                         ? LEFT_PROMOTION
-                         : DEFAULT_PROMOTION)
-                  : (MaxExponent<Rhs>::value > MaxExponent<int>::value
-                         ? RIGHT_PROMOTION
-                         : DEFAULT_PROMOTION) >
+                  ? LEFT_PROMOTION
+                  : RIGHT_PROMOTION>
 struct ArithmeticPromotion;
 
 template <typename Lhs, typename Rhs>
@@ -518,11 +511,6 @@
   typedef Rhs type;
 };
 
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<Lhs, Rhs, DEFAULT_PROMOTION> {
-  typedef int type;
-};
-
 // We can statically check if operations on the provided types can wrap, so we
 // can skip the checked operations if they're not needed. So, for an integer we
 // care if the destination type preserves the sign and is twice the width of
diff --git a/base/numerics/safe_numerics_unittest.cc b/base/numerics/safe_numerics_unittest.cc
index 861f515..4be7ab5 100644
--- a/base/numerics/safe_numerics_unittest.cc
+++ b/base/numerics/safe_numerics_unittest.cc
@@ -63,10 +63,13 @@
 
 // Helper macros to wrap displaying the conversion types and line numbers.
 #define TEST_EXPECTED_VALIDITY(expected, actual)                           \
-  EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).validity())              \
+  EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid())               \
       << "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
       << " on line " << line;
 
+#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
+#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
+
 #define TEST_EXPECTED_VALUE(expected, actual)                                \
   EXPECT_EQ(static_cast<Dst>(expected),                                      \
             CheckedNumeric<Dst>(actual).ValueUnsafe())                       \
@@ -82,43 +85,32 @@
                                 numeric_limits<Dst>::is_signed,
                             int>::type = 0) {
   typedef numeric_limits<Dst> DstLimits;
-  TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
-                         -CheckedNumeric<Dst>(DstLimits::min()));
-  TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()).Abs());
+  TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()).Abs());
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::max()) + -1);
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) + -1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) + -DstLimits::max());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+                        -DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) - 1);
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) - -1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW,
-      CheckedNumeric<Dst>(DstLimits::max()) - -DstLimits::max());
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) - DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) - -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+                        -DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+                        DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) * 2);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * 2);
 
-  TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) / -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
 
   // Modulus is legal only for integers.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
   TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
-  TEST_EXPECTED_VALIDITY(RANGE_INVALID, CheckedNumeric<Dst>(-1) % -2);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-1) % -2);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
   // Test all the different modulus combinations.
@@ -138,19 +130,14 @@
                                 !numeric_limits<Dst>::is_signed,
                             int>::type = 0) {
   typedef numeric_limits<Dst> DstLimits;
-  TEST_EXPECTED_VALIDITY(RANGE_VALID, -CheckedNumeric<Dst>(DstLimits::min()));
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()).Abs());
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) + -1);
-  TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW,
-                         CheckedNumeric<Dst>(DstLimits::min()) - 1);
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
-  TEST_EXPECTED_VALIDITY(
-      RANGE_VALID,
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
+  TEST_EXPECTED_SUCCESS(
       CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
           std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
           .UnsignedAbs());
@@ -176,29 +163,22 @@
     int line,
     typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
   typedef numeric_limits<Dst> DstLimits;
-  TEST_EXPECTED_VALIDITY(RANGE_VALID, -CheckedNumeric<Dst>(DstLimits::min()));
+  TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()).Abs());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
   TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) + -1);
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::max()) + 1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) + -DstLimits::max());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
+                        -DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW,
-      CheckedNumeric<Dst>(DstLimits::max()) - -DstLimits::max());
-  TEST_EXPECTED_VALIDITY(
-      RANGE_UNDERFLOW,
-      CheckedNumeric<Dst>(-DstLimits::max()) - DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
+                        -DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+                        DstLimits::max());
 
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) * 2);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) * 2);
 
   TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
   EXPECT_EQ(static_cast<Dst>(1.0), CheckedNumeric<Dst>(1.0).ValueFloating());
@@ -258,17 +238,15 @@
   TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
   TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::min()) + 1);
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW, CheckedNumeric<Dst>(DstLimits::max()) + DstLimits::max());
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
+                        DstLimits::max());
 
   // Generic subtraction.
   TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
   TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
-  TEST_EXPECTED_VALIDITY(RANGE_VALID,
-                         CheckedNumeric<Dst>(DstLimits::max()) - 1);
+  TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
 
   // Generic multiplication.
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
@@ -277,8 +255,8 @@
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
   TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
-  TEST_EXPECTED_VALIDITY(
-      RANGE_OVERFLOW, CheckedNumeric<Dst>(DstLimits::max()) * DstLimits::max());
+  TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+                        DstLimits::max());
 
   // Generic division.
   TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
@@ -349,18 +327,18 @@
                   "Comparison must be sign preserving and value preserving");
 
     const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
-    TEST_EXPECTED_VALIDITY(RANGE_VALID, checked_dst);
+    TEST_EXPECTED_SUCCESS(checked_dst);
     if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
       if (MaxExponent<Dst>::value >= MaxExponent<Src>::value * 2 - 1) {
         // At least twice larger type.
-        TEST_EXPECTED_VALIDITY(RANGE_VALID, SrcLimits::max() * checked_dst);
+        TEST_EXPECTED_SUCCESS(SrcLimits::max() * checked_dst);
 
       } else {  // Larger, but not at least twice as large.
-        TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, SrcLimits::max() * checked_dst);
-        TEST_EXPECTED_VALIDITY(RANGE_VALID, checked_dst + 1);
+        TEST_EXPECTED_FAILURE(SrcLimits::max() * checked_dst);
+        TEST_EXPECTED_SUCCESS(checked_dst + 1);
       }
     } else {  // Same width type.
-      TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + 1);
+      TEST_EXPECTED_FAILURE(checked_dst + 1);
     }
 
     TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
@@ -389,9 +367,9 @@
                   "Destination must be narrower than source");
 
     const CheckedNumeric<Dst> checked_dst;
-    TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst - SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst - SrcLimits::max());
 
     TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
     TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
@@ -418,7 +396,7 @@
       TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
       TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
     } else {
-      TEST_EXPECTED_VALIDITY(RANGE_INVALID, checked_dst - static_cast<Src>(1));
+      TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
       TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
     }
   }
@@ -436,8 +414,8 @@
 
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + static_cast<Src>(-1));
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + -SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
 
     TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
     TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
@@ -459,9 +437,9 @@
 
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
-    TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + static_cast<Src>(-1));
-    TEST_EXPECTED_VALIDITY(RANGE_UNDERFLOW, checked_dst + -SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
+    TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
 
     TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
     TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
@@ -501,7 +479,7 @@
 
     const CheckedNumeric<Dst> checked_dst;
     TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
-    TEST_EXPECTED_VALIDITY(RANGE_OVERFLOW, checked_dst + SrcLimits::max());
+    TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
     TEST_EXPECTED_VALUE(SrcLimits::min(), checked_dst + SrcLimits::min());
 
     TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
diff --git a/base/observer_list_threadsafe.h b/base/observer_list_threadsafe.h
index 6154ae9..6821795 100644
--- a/base/observer_list_threadsafe.h
+++ b/base/observer_list_threadsafe.h
@@ -17,8 +17,8 @@
 #include "base/observer_list.h"
 #include "base/single_thread_task_runner.h"
 #include "base/stl_util.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 ///////////////////////////////////////////////////////////////////////////////
 //
diff --git a/base/pickle.cc b/base/pickle.cc
index 016e934..4ef167b 100644
--- a/base/pickle.cc
+++ b/base/pickle.cc
@@ -231,6 +231,12 @@
   payload_size_ += bits::Align(length, sizeof(uint32_t));
 }
 
+void PickleSizer::AddAttachment() {
+  // From IPC::Message::WriteAttachment
+  AddBool();
+  AddInt();
+}
+
 template <size_t length> void PickleSizer::AddBytesStatic() {
   DCHECK_LE(length, static_cast<size_t>(std::numeric_limits<int>::max()));
   AddBytes(length);
@@ -358,12 +364,12 @@
     Resize(capacity_after_header_ * 2 + new_size);
 }
 
-bool Pickle::WriteAttachment(scoped_refptr<Attachment> /* attachment */) {
+bool Pickle::WriteAttachment(scoped_refptr<Attachment> /*attachment*/) {
   return false;
 }
 
-bool Pickle::ReadAttachment(base::PickleIterator* /* iter */,
-                            scoped_refptr<Attachment>* /* attachment */) const {
+bool Pickle::ReadAttachment(base::PickleIterator* /*iter*/,
+                            scoped_refptr<Attachment>* /*attachment*/) const {
   return false;
 }
 
diff --git a/base/pickle.h b/base/pickle.h
index eb4888a..40f5d26 100644
--- a/base/pickle.h
+++ b/base/pickle.h
@@ -132,6 +132,7 @@
   void AddString16(const StringPiece16& value);
   void AddData(int length);
   void AddBytes(int length);
+  void AddAttachment();
 
  private:
   // Just like AddBytes() but with a compile-time size for performance.
diff --git a/base/pickle_unittest.cc b/base/pickle_unittest.cc
index 307cb51..e00edd9 100644
--- a/base/pickle_unittest.cc
+++ b/base/pickle_unittest.cc
@@ -2,15 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/pickle.h"
+
 #include <limits.h>
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/pickle.h"
 #include "base/strings/string16.h"
 #include "base/strings/utf_string_conversions.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -163,7 +164,7 @@
 
 // Tests that we can handle really small buffers.
 TEST(PickleTest, SmallBuffer) {
-  scoped_ptr<char[]> buffer(new char[1]);
+  std::unique_ptr<char[]> buffer(new char[1]);
 
   // We should not touch the buffer.
   Pickle pickle(buffer.get(), 1);
@@ -329,7 +330,7 @@
 
 TEST(PickleTest, FindNextWithIncompleteHeader) {
   size_t header_size = sizeof(Pickle::Header);
-  scoped_ptr<char[]> buffer(new char[header_size - 1]);
+  std::unique_ptr<char[]> buffer(new char[header_size - 1]);
   memset(buffer.get(), 0x1, header_size - 1);
 
   const char* start = buffer.get();
@@ -346,7 +347,7 @@
   size_t header_size = sizeof(Pickle::Header);
   size_t header_size2 = 2 * header_size;
   size_t payload_received = 100;
-  scoped_ptr<char[]> buffer(new char[header_size2 + payload_received]);
+  std::unique_ptr<char[]> buffer(new char[header_size2 + payload_received]);
   const char* start = buffer.get();
   Pickle::Header* header = reinterpret_cast<Pickle::Header*>(buffer.get());
   const char* end = start + header_size2 + payload_received;
@@ -390,7 +391,7 @@
 
 TEST(PickleTest, Resize) {
   size_t unit = Pickle::kPayloadUnit;
-  scoped_ptr<char[]> data(new char[unit]);
+  std::unique_ptr<char[]> data(new char[unit]);
   char* data_ptr = data.get();
   for (size_t i = 0; i < unit; i++)
     data_ptr[i] = 'G';
diff --git a/base/posix/global_descriptors.h b/base/posix/global_descriptors.h
index c774634..1761e25 100644
--- a/base/posix/global_descriptors.h
+++ b/base/posix/global_descriptors.h
@@ -55,7 +55,9 @@
 #if !defined(OS_ANDROID)
   static const int kBaseDescriptor = 3;  // 0, 1, 2 are already taken.
 #else
-  static const int kBaseDescriptor = 4;  // 3 used by __android_log_write().
+  // 3 used by __android_log_write().
+  // 4 used by... something important on Android M.
+  static const int kBaseDescriptor = 5;
 #endif
 
   // Return the singleton instance of GlobalDescriptors.
diff --git a/base/power_monitor/power_monitor.h b/base/power_monitor/power_monitor.h
index 683eeb9..e025b32 100644
--- a/base/power_monitor/power_monitor.h
+++ b/base/power_monitor/power_monitor.h
@@ -20,7 +20,7 @@
 class BASE_EXPORT PowerMonitor {
  public:
   // Takes ownership of |source|.
-  explicit PowerMonitor(scoped_ptr<PowerMonitorSource> source);
+  explicit PowerMonitor(std::unique_ptr<PowerMonitorSource> source);
   ~PowerMonitor();
 
   // Get the process-wide PowerMonitor (if not present, returns NULL).
@@ -45,7 +45,7 @@
   void NotifyResume();
 
   scoped_refptr<ObserverListThreadSafe<PowerObserver> > observers_;
-  scoped_ptr<PowerMonitorSource> source_;
+  std::unique_ptr<PowerMonitorSource> source_;
 
   DISALLOW_COPY_AND_ASSIGN(PowerMonitor);
 };
diff --git a/base/process/launch.h b/base/process/launch.h
index b1811d4..adfa093 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -65,6 +65,9 @@
   // If true, wait for the process to complete.
   bool wait;
 
+  // If not empty, change to this directory before executing the new process.
+  base::FilePath current_directory;
+
 #if defined(OS_WIN)
   bool start_hidden;
 
@@ -151,9 +154,6 @@
 #endif  // defined(OS_LINUX)
 
 #if defined(OS_POSIX)
-  // If not empty, change to this directory before execing the new process.
-  base::FilePath current_directory;
-
   // If non-null, a delegate to be run immediately prior to executing the new
   // program in the child process.
   //
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index f1318ca..af14c91 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -22,6 +22,7 @@
 
 #include <iterator>
 #include <limits>
+#include <memory>
 #include <set>
 
 #include "base/command_line.h"
@@ -32,16 +33,16 @@
 #include "base/files/file_util.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/process.h"
 #include "base/process/process_metrics.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/waitable_event.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_restrictions.h"
 #include "build/build_config.h"
-#include "third_party/valgrind/valgrind.h"
 
 #if defined(OS_LINUX)
 #include <sys/prctl.h>
@@ -155,8 +156,12 @@
 // See crbug.com/177956.
 void ResetChildSignalHandlersToDefaults(void) {
   for (int signum = 1; ; ++signum) {
+#if defined(ANDROID)
     struct kernel_sigaction act;
     memset(&act, 0, sizeof(act));
+#else
+    struct kernel_sigaction act = {0};
+#endif
     int sigaction_get_ret = sys_rt_sigaction(signum, NULL, &act);
     if (sigaction_get_ret && errno == EINVAL) {
 #if !defined(NDEBUG)
@@ -202,7 +207,7 @@
 };
 
 // Automatically closes |DIR*|s.
-typedef scoped_ptr<DIR, ScopedDIRClose> ScopedDIR;
+typedef std::unique_ptr<DIR, ScopedDIRClose> ScopedDIR;
 
 #if defined(OS_LINUX)
 static const char kFDDir[] = "/proc/self/fd";
@@ -301,13 +306,13 @@
   fd_shuffle1.reserve(fd_shuffle_size);
   fd_shuffle2.reserve(fd_shuffle_size);
 
-  scoped_ptr<char* []> argv_cstr(new char* [argv.size() + 1]);
+  std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
   for (size_t i = 0; i < argv.size(); i++) {
     argv_cstr[i] = const_cast<char*>(argv[i].c_str());
   }
   argv_cstr[argv.size()] = NULL;
 
-  scoped_ptr<char*[]> new_environ;
+  std::unique_ptr<char* []> new_environ;
   char* const empty_environ = NULL;
   char* const* old_environ = GetEnvironment();
   if (options.clear_environ)
@@ -552,7 +557,7 @@
   int pipe_fd[2];
   pid_t pid;
   InjectiveMultimap fd_shuffle1, fd_shuffle2;
-  scoped_ptr<char*[]> argv_cstr(new char*[argv.size() + 1]);
+  std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
 
   fd_shuffle1.reserve(3);
   fd_shuffle2.reserve(3);
@@ -738,7 +743,7 @@
   // fork-like behavior.
   char stack_buf[PTHREAD_STACK_MIN] ALIGNAS(16);
 #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS64_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
   // The stack grows downward.
   void* stack = stack_buf + sizeof(stack_buf);
 #else
@@ -772,7 +777,7 @@
 #if defined(ARCH_CPU_X86_64)
     return syscall(__NR_clone, flags, nullptr, ptid, ctid, nullptr);
 #elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_MIPS64_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
     // CONFIG_CLONE_BACKWARDS defined.
     return syscall(__NR_clone, flags, nullptr, ptid, nullptr, ctid);
 #else
diff --git a/base/process/process_metrics.cc b/base/process/process_metrics.cc
index a21891d..0b38726 100644
--- a/base/process/process_metrics.cc
+++ b/base/process/process_metrics.cc
@@ -31,8 +31,8 @@
   return system_metrics;
 }
 
-scoped_ptr<Value> SystemMetrics::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+std::unique_ptr<Value> SystemMetrics::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   res->SetInteger("committed_memory", static_cast<int>(committed_memory_));
 #if defined(OS_LINUX) || defined(OS_ANDROID)
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 0d4d04a..8d4e51b 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -236,7 +236,10 @@
 // Returns 0 if it can't compute the commit charge.
 BASE_EXPORT size_t GetSystemCommitCharge();
 
-// Returns the number of bytes in a memory page.
+// Returns the number of bytes in a memory page. Do not use this to compute
+// the number of pages in a block of memory for calling mincore(). On some
+// platforms, e.g. iOS, mincore() uses a different page size from what is
+// returned by GetPageSize().
 BASE_EXPORT size_t GetPageSize();
 
 #if defined(OS_POSIX)
@@ -264,7 +267,7 @@
   SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
 
   // Serializes the platform specific fields to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
   int total;
   int free;
@@ -340,7 +343,7 @@
   SystemDiskInfo(const SystemDiskInfo& other);
 
   // Serializes the platform specific fields to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
   uint64_t reads;
   uint64_t reads_merged;
@@ -377,7 +380,7 @@
   }
 
   // Serializes the platform specific fields to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
   uint64_t num_reads;
   uint64_t num_writes;
@@ -401,7 +404,7 @@
   static SystemMetrics Sample();
 
   // Serializes the system metrics to value.
-  scoped_ptr<Value> ToValue() const;
+  std::unique_ptr<Value> ToValue() const;
 
  private:
   FRIEND_TEST_ALL_PREFIXES(SystemMetricsTest, SystemMetrics);
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index c6aff3e..89a2609 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -559,8 +559,8 @@
 SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
     default;
 
-scoped_ptr<Value> SystemMemoryInfoKB::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+std::unique_ptr<Value> SystemMemoryInfoKB::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   res->SetInteger("total", total);
   res->SetInteger("free", free);
@@ -772,8 +772,8 @@
 
 SystemDiskInfo::SystemDiskInfo(const SystemDiskInfo& other) = default;
 
-scoped_ptr<Value> SystemDiskInfo::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+std::unique_ptr<Value> SystemDiskInfo::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   // Write out uint64_t variables as doubles.
   // Note: this may discard some precision, but for JS there's no other option.
@@ -898,8 +898,8 @@
 }
 
 #if defined(OS_CHROMEOS)
-scoped_ptr<Value> SwapInfo::ToValue() const {
-  scoped_ptr<DictionaryValue> res(new DictionaryValue());
+std::unique_ptr<Value> SwapInfo::ToValue() const {
+  std::unique_ptr<DictionaryValue> res(new DictionaryValue());
 
   // Write out uint64_t variables as doubles.
   // Note: this may discard some precision, but for JS there's no other option.
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index 73e693e..94a2ffe 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -17,7 +17,6 @@
 #include "base/files/scoped_temp_dir.h"
 #include "base/macros.h"
 #include "base/strings/string_number_conversions.h"
-#include "base/strings/stringprintf.h"
 #include "base/test/multiprocess_test.h"
 #include "base/threading/thread.h"
 #include "build/build_config.h"
@@ -304,7 +303,7 @@
 // calls to it.
 TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
   ProcessHandle handle = GetCurrentProcessHandle();
-  scoped_ptr<ProcessMetrics> metrics(
+  std::unique_ptr<ProcessMetrics> metrics(
       ProcessMetrics::CreateProcessMetrics(handle));
 
   EXPECT_GE(metrics->GetCPUUsage(), 0.0);
@@ -425,7 +424,7 @@
   ASSERT_GT(initial_threads, 0);
   const int kNumAdditionalThreads = 10;
   {
-    scoped_ptr<Thread> my_threads[kNumAdditionalThreads];
+    std::unique_ptr<Thread> my_threads[kNumAdditionalThreads];
     for (int i = 0; i < kNumAdditionalThreads; ++i) {
       my_threads[i].reset(new Thread("GetNumberOfThreadsTest"));
       my_threads[i]->Start();
@@ -497,7 +496,7 @@
   ASSERT_TRUE(child.IsValid());
   WaitForEvent(temp_path, kSignalClosed);
 
-  scoped_ptr<ProcessMetrics> metrics(
+  std::unique_ptr<ProcessMetrics> metrics(
       ProcessMetrics::CreateProcessMetrics(child.Handle()));
   // Try a couple times to observe the child with 0 fds open.
   // Sometimes we've seen that the child can have 1 remaining
diff --git a/base/process/process_posix.cc b/base/process/process_posix.cc
index 248fc80..ba9b544 100644
--- a/base/process/process_posix.cc
+++ b/base/process/process_posix.cc
@@ -13,6 +13,7 @@
 #include "base/logging.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/kill.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
 #include "build/build_config.h"
 
 #if defined(OS_MACOSX)
@@ -100,8 +101,12 @@
     return false;
   }
 
+#if defined(ANDROID)
   struct kevent change;
   memset(&change, 0, sizeof(change));
+#else
+  struct kevent change = {0};
+#endif
   EV_SET(&change, handle, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
   int result = HANDLE_EINTR(kevent(kq.get(), &change, 1, NULL, 0, NULL));
   if (result == -1) {
@@ -125,8 +130,12 @@
   }
 
   result = -1;
+#if defined(ANDROID)
   struct kevent event;
   memset(&event, 0, sizeof(event));
+#else
+  struct kevent event = {0};
+#endif
 
   while (wait_forever || remaining_delta > base::TimeDelta()) {
     struct timespec remaining_timespec;
@@ -296,7 +305,7 @@
 }
 
 #if !defined(OS_NACL_NONSFI)
-bool Process::Terminate(int /* exit_code */, bool wait) const {
+bool Process::Terminate(int /*exit_code*/, bool wait) const {
   // exit_code isn't supportable.
   DCHECK(IsValid());
   CHECK_GT(process_, 0);
diff --git a/base/rand_util_unittest.cc b/base/rand_util_unittest.cc
index ea803ee..4f46b80 100644
--- a/base/rand_util_unittest.cc
+++ b/base/rand_util_unittest.cc
@@ -9,9 +9,9 @@
 
 #include <algorithm>
 #include <limits>
+#include <memory>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -143,7 +143,7 @@
   const int kTestIterations = 10;
   const size_t kTestBufferSize = 1 * 1024 * 1024;
 
-  scoped_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
+  std::unique_ptr<uint8_t[]> buffer(new uint8_t[kTestBufferSize]);
   const base::TimeTicks now = base::TimeTicks::Now();
   for (int i = 0; i < kTestIterations; ++i)
     base::RandBytes(buffer.get(), kTestBufferSize);
diff --git a/base/run_loop.cc b/base/run_loop.cc
index af2c568..4e425c9 100644
--- a/base/run_loop.cc
+++ b/base/run_loop.cc
@@ -68,6 +68,9 @@
   run_depth_ = previous_run_loop_? previous_run_loop_->run_depth_ + 1 : 1;
   loop_->run_loop_ = this;
 
+  if (run_depth_ > 1)
+    loop_->NotifyBeginNestedLoop();
+
   running_ = true;
   return true;
 }
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index a82bc91..af9d2bf 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -12,10 +12,11 @@
 
 #include <algorithm>
 #include <limits>
+#include <memory>
 
 #include "base/files/file_util.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/free_deleter.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -86,7 +87,7 @@
   }
 }
 
-#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
+#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_LINUX)
 #define MAYBE_NewOverflow DISABLED_NewOverflow
 #else
 #define MAYBE_NewOverflow NewOverflow
@@ -94,7 +95,6 @@
 // Test array[TooBig][X] and array[X][TooBig] allocations for int overflows.
 // IOS doesn't honor nothrow, so disable the test there.
 // Crashes on Windows Dbg builds, disable there as well.
-// Fails on Mac 10.8 http://crbug.com/227092
 // Disabled on Linux because failing Linux Valgrind bot, and Valgrind exclusions
 // are not currently read. See http://crbug.com/582398
 TEST(SecurityTest, MAYBE_NewOverflow) {
@@ -109,8 +109,8 @@
   const size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
   const size_t kDynamicArraySize2 = HideValueFromCompiler(kArraySize2);
   {
-    scoped_ptr<char[][kArraySize]> array_pointer(new (nothrow)
-        char[kDynamicArraySize2][kArraySize]);
+    std::unique_ptr<char[][kArraySize]> array_pointer(
+        new (nothrow) char[kDynamicArraySize2][kArraySize]);
     OverflowTestsSoftExpectTrue(!array_pointer);
   }
   // On windows, the compiler prevents static array sizes of more than
@@ -119,8 +119,8 @@
   ALLOW_UNUSED_LOCAL(kDynamicArraySize);
 #else
   {
-    scoped_ptr<char[][kArraySize2]> array_pointer(new (nothrow)
-        char[kDynamicArraySize][kArraySize2]);
+    std::unique_ptr<char[][kArraySize2]> array_pointer(
+        new (nothrow) char[kDynamicArraySize][kArraySize2]);
     OverflowTestsSoftExpectTrue(!array_pointer);
   }
 #endif  // !defined(OS_WIN) || !defined(ARCH_CPU_64_BITS)
@@ -156,7 +156,7 @@
   // 1 MB should get us past what TCMalloc pre-allocated before initializing
   // the sophisticated allocators.
   size_t kAllocSize = 1<<20;
-  scoped_ptr<char, base::FreeDeleter> ptr(
+  std::unique_ptr<char, base::FreeDeleter> ptr(
       static_cast<char*>(malloc(kAllocSize)));
   ASSERT_TRUE(ptr != NULL);
   // If two pointers are separated by less than 512MB, they are considered
diff --git a/base/sequence_checker_unittest.cc b/base/sequence_checker_unittest.cc
index e261b04..1e89a5f 100644
--- a/base/sequence_checker_unittest.cc
+++ b/base/sequence_checker_unittest.cc
@@ -6,6 +6,7 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <utility>
 
 #include "base/bind.h"
@@ -14,7 +15,6 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
 #include "base/test/sequenced_worker_pool_owner.h"
 #include "base/threading/thread.h"
@@ -94,7 +94,7 @@
   }
 
   void PostDeleteToOtherThread(
-      scoped_ptr<SequenceCheckedObject> sequence_checked_object) {
+      std::unique_ptr<SequenceCheckedObject> sequence_checked_object) {
     other_thread()->message_loop()->DeleteSoon(
         FROM_HERE,
         sequence_checked_object.release());
@@ -115,11 +115,11 @@
  private:
   MessageLoop message_loop_;  // Needed by SequencedWorkerPool to function.
   base::Thread other_thread_;
-  scoped_ptr<SequencedWorkerPoolOwner> pool_owner_;
+  std::unique_ptr<SequencedWorkerPoolOwner> pool_owner_;
 };
 
 TEST_F(SequenceCheckerTest, CallsAllowedOnSameThread) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // Verify that DoStuff doesn't assert.
@@ -130,7 +130,7 @@
 }
 
 TEST_F(SequenceCheckerTest, DestructorAllowedOnDifferentThread) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // Verify the destructor doesn't assert when called on a different thread.
@@ -139,7 +139,7 @@
 }
 
 TEST_F(SequenceCheckerTest, DetachFromSequence) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // Verify that DoStuff doesn't assert when called on a different thread after
@@ -151,7 +151,7 @@
 }
 
 TEST_F(SequenceCheckerTest, SameSequenceTokenValid) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -166,7 +166,7 @@
 }
 
 TEST_F(SequenceCheckerTest, DetachSequenceTokenValid) {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -186,7 +186,7 @@
 #if GTEST_HAS_DEATH_TEST || !ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::MethodOnDifferentThreadDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // DoStuff should assert in debug builds only when called on a
@@ -210,7 +210,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::DetachThenCallFromDifferentThreadDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   // DoStuff doesn't assert when called on a different thread
@@ -239,7 +239,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::DifferentSequenceTokensDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -268,7 +268,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::WorkerPoolAndSimpleThreadDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
@@ -295,7 +295,7 @@
 #endif  // ENABLE_SEQUENCE_CHECKER
 
 void SequenceCheckerTest::TwoDifferentWorkerPoolsDeathTest() {
-  scoped_ptr<SequenceCheckedObject> sequence_checked_object(
+  std::unique_ptr<SequenceCheckedObject> sequence_checked_object(
       new SequenceCheckedObject);
 
   sequence_checked_object->DetachFromSequence();
diff --git a/base/strings/safe_sprintf_unittest.cc b/base/strings/safe_sprintf_unittest.cc
index 931ace8..1a21728 100644
--- a/base/strings/safe_sprintf_unittest.cc
+++ b/base/strings/safe_sprintf_unittest.cc
@@ -10,10 +10,10 @@
 #include <string.h>
 
 #include <limits>
+#include <memory>
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -205,7 +205,7 @@
   // There is a more complicated test in PrintLongString() that covers a lot
   // more edge case, but it is also harder to debug in case of a failure.
   const char kTestString[] = "This is a test";
-  scoped_ptr<char[]> buf(new char[sizeof(kTestString)]);
+  std::unique_ptr<char[]> buf(new char[sizeof(kTestString)]);
   EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
             SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
   EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
@@ -369,7 +369,7 @@
 
   // Allocate slightly more space, so that we can verify that SafeSPrintf()
   // never writes past the end of the buffer.
-  scoped_ptr<char[]> tmp(new char[sz+2]);
+  std::unique_ptr<char[]> tmp(new char[sz + 2]);
   memset(tmp.get(), 'X', sz+2);
 
   // Use SafeSPrintf() to output a complex list of arguments:
@@ -383,7 +383,7 @@
   char* out = tmp.get();
   size_t out_sz = sz;
   size_t len;
-  for (scoped_ptr<char[]> perfect_buf;;) {
+  for (std::unique_ptr<char[]> perfect_buf;;) {
     size_t needed = SafeSNPrintf(out, out_sz,
 #if defined(NDEBUG)
                             "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
diff --git a/base/strings/string_util.cc b/base/strings/string_util.cc
index e8000ab..6bbc215 100644
--- a/base/strings/string_util.cc
+++ b/base/strings/string_util.cc
@@ -888,6 +888,7 @@
     const std::vector<OutStringType>& subst,
     std::vector<size_t>* offsets) {
   size_t substitutions = subst.size();
+  DCHECK_LT(substitutions, 10U);
 
   size_t sub_length = 0;
   for (const auto& cur : subst)
@@ -901,7 +902,6 @@
     if ('$' == *i) {
       if (i + 1 != format_string.end()) {
         ++i;
-        DCHECK('$' == *i || '1' <= *i) << "Invalid placeholder: " << *i;
         if ('$' == *i) {
           while (i != format_string.end() && '$' == *i) {
             formatted.push_back('$');
@@ -909,14 +909,11 @@
           }
           --i;
         } else {
-          uintptr_t index = 0;
-          while (i != format_string.end() && '0' <= *i && *i <= '9') {
-            index *= 10;
-            index += *i - '0';
-            ++i;
+          if (*i < '1' || *i > '9') {
+            DLOG(ERROR) << "Invalid placeholder: $" << *i;
+            continue;
           }
-          --i;
-          index -= 1;
+          uintptr_t index = *i - '1';
           if (offsets) {
             ReplacementOffset r_offset(index,
                 static_cast<int>(formatted.size()));
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
index f1d708a..0ee077c 100644
--- a/base/strings/string_util.h
+++ b/base/strings/string_util.h
@@ -21,9 +21,11 @@
 #include "base/strings/string_piece.h"  // For implicit conversions.
 #include "build/build_config.h"
 
+#if defined(ANDROID)
 // On Android, bionic's stdio.h defines an snprintf macro when being built with
 // clang. Undefine it here so it won't collide with base::snprintf().
 #undef snprintf
+#endif  // defined(ANDROID)
 
 namespace base {
 
@@ -341,7 +343,15 @@
 }
 template <typename Char>
 inline bool IsAsciiAlpha(Char c) {
-  return ((c >= 'A') && (c <= 'Z')) || ((c >= 'a') && (c <= 'z'));
+  return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+template <typename Char>
+inline bool IsAsciiUpper(Char c) {
+  return c >= 'A' && c <= 'Z';
+}
+template <typename Char>
+inline bool IsAsciiLower(Char c) {
+  return c >= 'a' && c <= 'z';
 }
 template <typename Char>
 inline bool IsAsciiDigit(Char c) {
@@ -433,7 +443,7 @@
 BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
                                 StringPiece16 separator);
 
-// Replace $1-$2-$3..$9 in the format string with |a|-|b|-|c|..|i| respectively.
+// Replace $1-$2-$3..$9 in the format string with values from |subst|.
 // Additionally, any number of consecutive '$' characters is replaced by that
 // number less one. Eg $$->$, $$$->$$, etc. The offsets parameter here can be
 // NULL. This only allows you to use up to nine replacements.
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
index 79eed61..df2226e 100644
--- a/base/strings/string_util_unittest.cc
+++ b/base/strings/string_util_unittest.cc
@@ -820,9 +820,9 @@
 
   string16 formatted =
       ReplaceStringPlaceholders(
-          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$1g,$2h,$3i"), subst, NULL);
+          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$1g,$2h,$3i"), subst, nullptr);
 
-  EXPECT_EQ(formatted, ASCIIToUTF16("9aa,8bb,7cc,d,e,f,9ag,8bh,7ci"));
+  EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,d,e,f,9ag,8bh,7ci"), formatted);
 }
 
 TEST(StringUtilTest, ReplaceStringPlaceholders) {
@@ -839,35 +839,25 @@
 
   string16 formatted =
       ReplaceStringPlaceholders(
-          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i"), subst, NULL);
+          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i"), subst, nullptr);
 
-  EXPECT_EQ(formatted, ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii"));
+  EXPECT_EQ(ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii"), formatted);
 }
 
-TEST(StringUtilTest, ReplaceStringPlaceholdersMoreThan9Replacements) {
+TEST(StringUtilTest, ReplaceStringPlaceholdersOneDigit) {
   std::vector<string16> subst;
-  subst.push_back(ASCIIToUTF16("9a"));
-  subst.push_back(ASCIIToUTF16("8b"));
-  subst.push_back(ASCIIToUTF16("7c"));
-  subst.push_back(ASCIIToUTF16("6d"));
-  subst.push_back(ASCIIToUTF16("5e"));
-  subst.push_back(ASCIIToUTF16("4f"));
-  subst.push_back(ASCIIToUTF16("3g"));
-  subst.push_back(ASCIIToUTF16("2h"));
-  subst.push_back(ASCIIToUTF16("1i"));
-  subst.push_back(ASCIIToUTF16("0j"));
-  subst.push_back(ASCIIToUTF16("-1k"));
-  subst.push_back(ASCIIToUTF16("-2l"));
-  subst.push_back(ASCIIToUTF16("-3m"));
-  subst.push_back(ASCIIToUTF16("-4n"));
-
+  subst.push_back(ASCIIToUTF16("1a"));
   string16 formatted =
-      ReplaceStringPlaceholders(
-          ASCIIToUTF16("$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i,"
-                       "$10j,$11k,$12l,$13m,$14n,$1"), subst, NULL);
+      ReplaceStringPlaceholders(ASCIIToUTF16(" $16 "), subst, nullptr);
+  EXPECT_EQ(ASCIIToUTF16(" 1a6 "), formatted);
+}
 
-  EXPECT_EQ(formatted, ASCIIToUTF16("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,"
-                                    "1ii,0jj,-1kk,-2ll,-3mm,-4nn,9a"));
+TEST(StringUtilTest, ReplaceStringPlaceholdersInvalidPlaceholder) {
+  std::vector<string16> subst;
+  subst.push_back(ASCIIToUTF16("1a"));
+  string16 formatted =
+      ReplaceStringPlaceholders(ASCIIToUTF16("+$-+$A+$1+"), subst, nullptr);
+  EXPECT_EQ(ASCIIToUTF16("+++1a+"), formatted);
 }
 
 TEST(StringUtilTest, StdStringReplaceStringPlaceholders) {
@@ -884,9 +874,9 @@
 
   std::string formatted =
       ReplaceStringPlaceholders(
-          "$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i", subst, NULL);
+          "$1a,$2b,$3c,$4d,$5e,$6f,$7g,$8h,$9i", subst, nullptr);
 
-  EXPECT_EQ(formatted, "9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii");
+  EXPECT_EQ("9aa,8bb,7cc,6dd,5ee,4ff,3gg,2hh,1ii", formatted);
 }
 
 TEST(StringUtilTest, ReplaceStringPlaceholdersConsecutiveDollarSigns) {
@@ -894,7 +884,7 @@
   subst.push_back("a");
   subst.push_back("b");
   subst.push_back("c");
-  EXPECT_EQ(ReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst, NULL),
+  EXPECT_EQ(ReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst, nullptr),
             "$1 $$2 $$$3");
 }
 
diff --git a/base/strings/utf_string_conversion_utils.cc b/base/strings/utf_string_conversion_utils.cc
index 807e22d..22058a5 100644
--- a/base/strings/utf_string_conversion_utils.cc
+++ b/base/strings/utf_string_conversion_utils.cc
@@ -55,7 +55,7 @@
 
 #if defined(WCHAR_T_IS_UTF32)
 bool ReadUnicodeCharacter(const wchar_t* src,
-                          int32_t /* src_len */,
+                          int32_t /*src_len*/,
                           int32_t* char_index,
                           uint32_t* code_point) {
   // Conversion is easy since the source is 32-bit.
diff --git a/base/sync_socket_posix.cc b/base/sync_socket_posix.cc
index 923509c..995c8e9 100644
--- a/base/sync_socket_posix.cc
+++ b/base/sync_socket_posix.cc
@@ -105,9 +105,8 @@
   return descriptor.fd;
 }
 
-bool SyncSocket::PrepareTransitDescriptor(
-    ProcessHandle /* peer_process_handle */,
-    TransitDescriptor* descriptor) {
+bool SyncSocket::PrepareTransitDescriptor(ProcessHandle /*peer_process_handle*/,
+                                          TransitDescriptor* descriptor) {
   descriptor->fd = handle();
   descriptor->auto_close = false;
   return descriptor->fd != kInvalidHandle;
diff --git a/base/synchronization/condition_variable.h b/base/synchronization/condition_variable.h
index a41b2ba..ebf90d2 100644
--- a/base/synchronization/condition_variable.h
+++ b/base/synchronization/condition_variable.h
@@ -75,9 +75,12 @@
 #include <pthread.h>
 #endif
 
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
 namespace base {
 
-class ConditionVarImpl;
 class TimeDelta;
 
 class BASE_EXPORT ConditionVariable {
@@ -100,14 +103,15 @@
  private:
 
 #if defined(OS_WIN)
-  ConditionVarImpl* impl_;
+  CONDITION_VARIABLE cv_;
+  SRWLOCK* const srwlock_;
 #elif defined(OS_POSIX)
   pthread_cond_t condition_;
   pthread_mutex_t* user_mutex_;
-#if DCHECK_IS_ON()
-  base::Lock* user_lock_;     // Needed to adjust shadow lock state on wait.
 #endif
 
+#if DCHECK_IS_ON() && (defined(OS_WIN) || defined(OS_POSIX))
+  base::Lock* const user_lock_;  // Needed to adjust shadow lock state on wait.
 #endif
 
   DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
diff --git a/base/synchronization/condition_variable_unittest.cc b/base/synchronization/condition_variable_unittest.cc
index 4503922..d60b2b8 100644
--- a/base/synchronization/condition_variable_unittest.cc
+++ b/base/synchronization/condition_variable_unittest.cc
@@ -4,16 +4,18 @@
 
 // Multi-threaded tests of ConditionVariable class.
 
+#include "base/synchronization/condition_variable.h"
+
 #include <time.h>
+
 #include <algorithm>
+#include <memory>
 #include <vector>
 
 #include "base/bind.h"
 #include "base/location.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
-#include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
 #include "base/synchronization/spin_wait.h"
 #include "base/threading/platform_thread.h"
@@ -133,7 +135,7 @@
 
   const int thread_count_;
   int waiting_thread_count_;
-  scoped_ptr<PlatformThreadHandle[]> thread_handles_;
+  std::unique_ptr<PlatformThreadHandle[]> thread_handles_;
   std::vector<int> assignment_history_;  // Number of assignment per worker.
   std::vector<int> completion_history_;  // Number of completions per worker.
   int thread_started_counter_;  // Used to issue unique id to workers.
diff --git a/base/synchronization/lock.h b/base/synchronization/lock.h
index f7dd35d..fbf6cef 100644
--- a/base/synchronization/lock.h
+++ b/base/synchronization/lock.h
@@ -38,9 +38,9 @@
   Lock();
   ~Lock();
 
-  // NOTE: Although windows critical sections support recursive locks, we do not
-  // allow this, and we will commonly fire a DCHECK() if a thread attempts to
-  // acquire the lock a second time (while already holding it).
+  // NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
+  // a thread attempts to acquire the lock a second time (while already holding
+  // it).
   void Acquire() {
     lock_.Lock();
     CheckUnheldAndMark();
@@ -61,15 +61,11 @@
   void AssertAcquired() const;
 #endif  // DCHECK_IS_ON()
 
-#if defined(OS_POSIX)
-  // The posix implementation of ConditionVariable needs to be able
-  // to see our lock and tweak our debugging counters, as it releases
-  // and acquires locks inside of pthread_cond_{timed,}wait.
+#if defined(OS_POSIX) || defined(OS_WIN)
+  // Both Windows and POSIX implementations of ConditionVariable need to be
+  // able to see our lock and tweak our debugging counters, as they release and
+  // acquire locks inside of their condition variable APIs.
   friend class ConditionVariable;
-#elif defined(OS_WIN)
-  // The Windows Vista implementation of ConditionVariable needs the
-  // native handle of the critical section.
-  friend class WinVistaCondVar;
 #endif
 
  private:
diff --git a/base/synchronization/lock_impl.h b/base/synchronization/lock_impl.h
index ed85987..cbaabc7 100644
--- a/base/synchronization/lock_impl.h
+++ b/base/synchronization/lock_impl.h
@@ -24,9 +24,9 @@
 class BASE_EXPORT LockImpl {
  public:
 #if defined(OS_WIN)
-  typedef CRITICAL_SECTION NativeHandle;
+  using NativeHandle = SRWLOCK;
 #elif defined(OS_POSIX)
-  typedef pthread_mutex_t NativeHandle;
+  using NativeHandle =  pthread_mutex_t;
 #endif
 
   LockImpl();
diff --git a/base/synchronization/waitable_event_watcher_posix.cc b/base/synchronization/waitable_event_watcher_posix.cc
index aa425f2..7cf8688 100644
--- a/base/synchronization/waitable_event_watcher_posix.cc
+++ b/base/synchronization/waitable_event_watcher_posix.cc
@@ -145,8 +145,8 @@
 
   cancel_flag_ = new Flag;
   callback_ = callback;
-  internal_callback_ =
-      base::Bind(&AsyncCallbackHelper, cancel_flag_, callback_, event);
+  internal_callback_ = base::Bind(
+      &AsyncCallbackHelper, base::RetainedRef(cancel_flag_), callback_, event);
   WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get();
 
   AutoLock locked(kernel->lock_);
diff --git a/base/sys_info.cc b/base/sys_info.cc
index cebb363..5aac9b7 100644
--- a/base/sys_info.cc
+++ b/base/sys_info.cc
@@ -28,7 +28,7 @@
     return false;
 
   int ram_size_mb = SysInfo::AmountOfPhysicalMemoryMB();
-  return (ram_size_mb > 0 && ram_size_mb < kLowMemoryDeviceThresholdMB);
+  return (ram_size_mb > 0 && ram_size_mb <= kLowMemoryDeviceThresholdMB);
 }
 
 static LazyInstance<
diff --git a/base/sys_info_chromeos.cc b/base/sys_info_chromeos.cc
index e35bd0a..3794ed9 100644
--- a/base/sys_info_chromeos.cc
+++ b/base/sys_info_chromeos.cc
@@ -60,7 +60,7 @@
     is_running_on_chromeos_ = false;
 
     std::string lsb_release, lsb_release_time_str;
-    scoped_ptr<Environment> env(Environment::Create());
+    std::unique_ptr<Environment> env(Environment::Create());
     bool parsed_from_env =
         env->GetVar(kLsbReleaseKey, &lsb_release) &&
         env->GetVar(kLsbReleaseTimeKey, &lsb_release_time_str);
@@ -212,7 +212,7 @@
 // static
 void SysInfo::SetChromeOSVersionInfoForTest(const std::string& lsb_release,
                                             const Time& lsb_release_time) {
-  scoped_ptr<Environment> env(Environment::Create());
+  std::unique_ptr<Environment> env(Environment::Create());
   env->SetVar(kLsbReleaseKey, lsb_release);
   env->SetVar(kLsbReleaseTimeKey,
               DoubleToString(lsb_release_time.ToDoubleT()));
diff --git a/base/sys_info_mac.cc b/base/sys_info_mac.cc
deleted file mode 100644
index ff1ec5c..0000000
--- a/base/sys_info_mac.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/sys_info.h"
-
-#include <ApplicationServices/ApplicationServices.h>
-#include <CoreServices/CoreServices.h>
-#include <mach/mach_host.h>
-#include <mach/mach_init.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/sysctl.h>
-#include <sys/types.h>
-
-#include "base/logging.h"
-#include "base/mac/scoped_mach_port.h"
-#include "base/macros.h"
-#include "base/strings/stringprintf.h"
-
-namespace base {
-
-// static
-std::string SysInfo::OperatingSystemName() {
-  return "Mac OS X";
-}
-
-// static
-std::string SysInfo::OperatingSystemVersion() {
-  int32_t major, minor, bugfix;
-  OperatingSystemVersionNumbers(&major, &minor, &bugfix);
-  return base::StringPrintf("%d.%d.%d", major, minor, bugfix);
-}
-
-// static
-void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
-                                            int32_t* minor_version,
-                                            int32_t* bugfix_version) {
-  Gestalt(gestaltSystemVersionMajor,
-      reinterpret_cast<SInt32*>(major_version));
-  Gestalt(gestaltSystemVersionMinor,
-      reinterpret_cast<SInt32*>(minor_version));
-  Gestalt(gestaltSystemVersionBugFix,
-      reinterpret_cast<SInt32*>(bugfix_version));
-}
-
-// static
-int64_t SysInfo::AmountOfPhysicalMemory() {
-  struct host_basic_info hostinfo;
-  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
-  base::mac::ScopedMachSendRight host(mach_host_self());
-  int result = host_info(host.get(),
-                         HOST_BASIC_INFO,
-                         reinterpret_cast<host_info_t>(&hostinfo),
-                         &count);
-  if (result != KERN_SUCCESS) {
-    NOTREACHED();
-    return 0;
-  }
-  DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
-  return static_cast<int64_t>(hostinfo.max_mem);
-}
-
-// static
-int64_t SysInfo::AmountOfAvailablePhysicalMemory() {
-  base::mac::ScopedMachSendRight host(mach_host_self());
-  vm_statistics_data_t vm_info;
-  mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
-
-  if (host_statistics(host.get(),
-                      HOST_VM_INFO,
-                      reinterpret_cast<host_info_t>(&vm_info),
-                      &count) != KERN_SUCCESS) {
-    NOTREACHED();
-    return 0;
-  }
-
-  return static_cast<int64_t>(vm_info.free_count - vm_info.speculative_count) *
-         PAGE_SIZE;
-}
-
-// static
-std::string SysInfo::CPUModelName() {
-  char name[256];
-  size_t len = arraysize(name);
-  if (sysctlbyname("machdep.cpu.brand_string", &name, &len, NULL, 0) == 0)
-    return name;
-  return std::string();
-}
-
-std::string SysInfo::HardwareModelName() {
-  char model[256];
-  size_t len = sizeof(model);
-  if (sysctlbyname("hw.model", model, &len, NULL, 0) == 0)
-    return std::string(model, 0, len);
-  return std::string();
-}
-
-}  // namespace base
diff --git a/base/task/cancelable_task_tracker.cc b/base/task/cancelable_task_tracker.cc
index 375ff8b..6f39410 100644
--- a/base/task/cancelable_task_tracker.cc
+++ b/base/task/cancelable_task_tracker.cc
@@ -16,7 +16,7 @@
 #include "base/single_thread_task_runner.h"
 #include "base/synchronization/cancellation_flag.h"
 #include "base/task_runner.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 using base::Bind;
 using base::CancellationFlag;
@@ -39,7 +39,7 @@
 }
 
 bool IsCanceled(const CancellationFlag* flag,
-                base::ScopedClosureRunner* /* cleanup_runner */) {
+                base::ScopedClosureRunner* /*cleanup_runner*/) {
   return flag->IsSet();
 }
 
@@ -131,9 +131,10 @@
 
   // Will always run |untrack_and_delete_flag| on current MessageLoop.
   base::ScopedClosureRunner* untrack_and_delete_flag_runner =
-      new base::ScopedClosureRunner(Bind(&RunOrPostToTaskRunner,
-                                         base::ThreadTaskRunnerHandle::Get(),
-                                         untrack_and_delete_flag));
+      new base::ScopedClosureRunner(
+          Bind(&RunOrPostToTaskRunner,
+               RetainedRef(base::ThreadTaskRunnerHandle::Get()),
+               untrack_and_delete_flag));
 
   *is_canceled_cb =
       Bind(&IsCanceled, flag, base::Owned(untrack_and_delete_flag_runner));
diff --git a/base/task_runner_util.h b/base/task_runner_util.h
index da088db..ba8e120 100644
--- a/base/task_runner_util.h
+++ b/base/task_runner_util.h
@@ -7,7 +7,6 @@
 
 #include "base/bind.h"
 #include "base/bind_helpers.h"
-#include "base/callback_internal.h"
 #include "base/logging.h"
 #include "base/task_runner.h"
 
@@ -32,7 +31,7 @@
   // current code that relies on this API softness has been removed.
   // http://crbug.com/162712
   if (!callback.is_null())
-    callback.Run(CallbackForward(*result));
+    callback.Run(std::move(*result));
 }
 
 }  // namespace internal
diff --git a/base/task_runner_util_unittest.cc b/base/task_runner_util_unittest.cc
index 0a4f22e..1df5436 100644
--- a/base/task_runner_util_unittest.cc
+++ b/base/task_runner_util_unittest.cc
@@ -36,13 +36,13 @@
   }
 };
 
-scoped_ptr<Foo> CreateFoo() {
-  return scoped_ptr<Foo>(new Foo);
+std::unique_ptr<Foo> CreateFoo() {
+  return std::unique_ptr<Foo>(new Foo);
 }
 
-void ExpectFoo(scoped_ptr<Foo> foo) {
+void ExpectFoo(std::unique_ptr<Foo> foo) {
   EXPECT_TRUE(foo.get());
-  scoped_ptr<Foo> local_foo(std::move(foo));
+  std::unique_ptr<Foo> local_foo(std::move(foo));
   EXPECT_TRUE(local_foo.get());
   EXPECT_FALSE(foo.get());
 }
@@ -54,13 +54,13 @@
   };
 };
 
-scoped_ptr<Foo, FooDeleter> CreateScopedFoo() {
-  return scoped_ptr<Foo, FooDeleter>(new Foo);
+std::unique_ptr<Foo, FooDeleter> CreateScopedFoo() {
+  return std::unique_ptr<Foo, FooDeleter>(new Foo);
 }
 
-void ExpectScopedFoo(scoped_ptr<Foo, FooDeleter> foo) {
+void ExpectScopedFoo(std::unique_ptr<Foo, FooDeleter> foo) {
   EXPECT_TRUE(foo.get());
-  scoped_ptr<Foo, FooDeleter> local_foo(std::move(foo));
+  std::unique_ptr<Foo, FooDeleter> local_foo(std::move(foo));
   EXPECT_TRUE(local_foo.get());
   EXPECT_FALSE(foo.get());
 }
diff --git a/base/task_scheduler/scheduler_lock.h b/base/task_scheduler/scheduler_lock.h
index be7c71c..c969eb1 100644
--- a/base/task_scheduler/scheduler_lock.h
+++ b/base/task_scheduler/scheduler_lock.h
@@ -5,9 +5,10 @@
 #ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
 #define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
 
+#include <memory>
+
 #include "base/base_export.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
 #include "base/task_scheduler/scheduler_lock_impl.h"
@@ -41,7 +42,7 @@
 // void AssertAcquired().
 //     DCHECKs if the lock is not acquired.
 //
-// scoped_ptr<ConditionVariable> CreateConditionVariable()
+// std::unique_ptr<ConditionVariable> CreateConditionVariable()
 //     Creates a condition variable using this as a lock.
 
 #if DCHECK_IS_ON()
@@ -57,8 +58,8 @@
   SchedulerLock() = default;
   explicit SchedulerLock(const SchedulerLock*) {}
 
-  scoped_ptr<ConditionVariable> CreateConditionVariable() {
-    return scoped_ptr<ConditionVariable>(new ConditionVariable(this));
+  std::unique_ptr<ConditionVariable> CreateConditionVariable() {
+    return std::unique_ptr<ConditionVariable>(new ConditionVariable(this));
   }
 };
 #endif  // DCHECK_IS_ON()
diff --git a/base/task_scheduler/scheduler_lock_impl.cc b/base/task_scheduler/scheduler_lock_impl.cc
index 609ea22..7480e18 100644
--- a/base/task_scheduler/scheduler_lock_impl.cc
+++ b/base/task_scheduler/scheduler_lock_impl.cc
@@ -136,8 +136,9 @@
   lock_.AssertAcquired();
 }
 
-scoped_ptr<ConditionVariable> SchedulerLockImpl::CreateConditionVariable() {
-  return scoped_ptr<ConditionVariable>(new ConditionVariable(&lock_));
+std::unique_ptr<ConditionVariable>
+SchedulerLockImpl::CreateConditionVariable() {
+  return std::unique_ptr<ConditionVariable>(new ConditionVariable(&lock_));
 }
 
 }  // namespace internal
diff --git a/base/task_scheduler/scheduler_lock_impl.h b/base/task_scheduler/scheduler_lock_impl.h
index 51826fc..65699bb 100644
--- a/base/task_scheduler/scheduler_lock_impl.h
+++ b/base/task_scheduler/scheduler_lock_impl.h
@@ -5,9 +5,10 @@
 #ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
 #define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
 
+#include <memory>
+
 #include "base/base_export.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/synchronization/lock.h"
 
 namespace base {
@@ -31,7 +32,7 @@
 
   void AssertAcquired() const;
 
-  scoped_ptr<ConditionVariable> CreateConditionVariable();
+  std::unique_ptr<ConditionVariable> CreateConditionVariable();
 
  private:
   Lock lock_;
diff --git a/base/task_scheduler/scheduler_lock_unittest.cc b/base/task_scheduler/scheduler_lock_unittest.cc
index 48b8b08..6267559 100644
--- a/base/task_scheduler/scheduler_lock_unittest.cc
+++ b/base/task_scheduler/scheduler_lock_unittest.cc
@@ -10,6 +10,7 @@
 #include "base/macros.h"
 #include "base/rand_util.h"
 #include "base/synchronization/waitable_event.h"
+#include "base/task_scheduler/test_utils.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/simple_thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -18,13 +19,6 @@
 namespace internal {
 namespace {
 
-// Death tests misbehave on Android.
-#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-#define EXPECT_DCHECK_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
-#else
-#define EXPECT_DCHECK_DEATH(statement, regex)
-#endif
-
 // Adapted from base::Lock's BasicLockTestThread to make sure
 // Acquire()/Release() don't crash.
 class BasicLockTestThread : public SimpleThread {
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
index a05c802..4ecb605 100644
--- a/base/task_scheduler/sequence.cc
+++ b/base/task_scheduler/sequence.cc
@@ -14,7 +14,7 @@
 
 Sequence::Sequence() = default;
 
-bool Sequence::PushTask(scoped_ptr<Task> task) {
+bool Sequence::PushTask(std::unique_ptr<Task> task) {
   DCHECK(task->sequenced_time.is_null());
   task->sequenced_time = base::TimeTicks::Now();
 
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
index e86cf59..37cb8d5 100644
--- a/base/task_scheduler/sequence.h
+++ b/base/task_scheduler/sequence.h
@@ -7,12 +7,12 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <queue>
 
 #include "base/base_export.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/task_scheduler/scheduler_lock.h"
 #include "base/task_scheduler/sequence_sort_key.h"
 #include "base/task_scheduler/task.h"
@@ -22,6 +22,19 @@
 namespace internal {
 
 // A sequence holds tasks that must be executed in posting order.
+//
+// Note: there is a known refcounted-ownership cycle in the Scheduler
+// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
+// This is okay so long as the other owners of Sequence (PriorityQueue and
+// SchedulerWorkerThread in alternance and
+// SchedulerThreadPoolImpl::SchedulerWorkerThreadDelegateImpl::GetWork()
+// temporarily) keep running it (and taking Tasks from it as a result). A
+// dangling reference cycle would only occur should they release their reference
+// to it while it's not empty. In other words, it is only correct for them to
+// release it after PopTask() returns false to indicate it was made empty by
+// that call (in which case the next PushTask() will return true to indicate to
+// the caller that the Sequence should be re-enqueued for execution).
+//
 // This class is thread-safe.
 class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
  public:
@@ -29,7 +42,7 @@
 
   // Adds |task| at the end of the sequence's queue. Returns true if the
   // sequence was empty before this operation.
-  bool PushTask(scoped_ptr<Task> task);
+  bool PushTask(std::unique_ptr<Task> task);
 
   // Returns the task in front of the sequence's queue, if any.
   const Task* PeekTask() const;
@@ -51,7 +64,7 @@
   mutable SchedulerLock lock_;
 
   // Queue of tasks to execute.
-  std::queue<scoped_ptr<Task>> queue_;
+  std::queue<std::unique_ptr<Task>> queue_;
 
   // Number of tasks contained in the sequence for each priority.
   size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
diff --git a/base/task_scheduler/sequence_sort_key.cc b/base/task_scheduler/sequence_sort_key.cc
index 758a411..e356c8b 100644
--- a/base/task_scheduler/sequence_sort_key.cc
+++ b/base/task_scheduler/sequence_sort_key.cc
@@ -9,19 +9,20 @@
 
 SequenceSortKey::SequenceSortKey(TaskPriority priority,
                                  TimeTicks next_task_sequenced_time)
-    : priority(priority), next_task_sequenced_time(next_task_sequenced_time) {}
+    : priority_(priority),
+      next_task_sequenced_time_(next_task_sequenced_time) {}
 
 bool SequenceSortKey::operator<(const SequenceSortKey& other) const {
   // This SequenceSortKey is considered less important than |other| if it has a
   // lower priority or if it has the same priority but its next task was posted
   // later than |other|'s.
   const int priority_diff =
-      static_cast<int>(priority) - static_cast<int>(other.priority);
+      static_cast<int>(priority_) - static_cast<int>(other.priority_);
   if (priority_diff < 0)
     return true;
   if (priority_diff > 0)
     return false;
-  return next_task_sequenced_time > other.next_task_sequenced_time;
+  return next_task_sequenced_time_ > other.next_task_sequenced_time_;
 }
 
 }  // namespace internal
diff --git a/base/task_scheduler/sequence_sort_key.h b/base/task_scheduler/sequence_sort_key.h
index f2dd561..eb81708 100644
--- a/base/task_scheduler/sequence_sort_key.h
+++ b/base/task_scheduler/sequence_sort_key.h
@@ -12,20 +12,35 @@
 namespace base {
 namespace internal {
 
-// An immutable representation of the priority of a Sequence.
-struct BASE_EXPORT SequenceSortKey final {
+// An immutable but assignable representation of the priority of a Sequence.
+class BASE_EXPORT SequenceSortKey final {
+ public:
   SequenceSortKey(TaskPriority priority, TimeTicks next_task_sequenced_time);
 
+  TaskPriority priority() const { return priority_; }
+
   bool operator<(const SequenceSortKey& other) const;
   bool operator>(const SequenceSortKey& other) const { return other < *this; }
 
+  bool operator==(const SequenceSortKey& other) const {
+    return priority_ == other.priority_ &&
+           next_task_sequenced_time_ == other.next_task_sequenced_time_;
+  }
+  bool operator!=(const SequenceSortKey& other) const {
+    return !(other == *this);
+  };
+
+ private:
+  // The private section allows this class to keep its immutable property while
+  // being copy-assignable (i.e. instead of making its members const).
+
   // Highest task priority in the sequence at the time this sort key was
   // created.
-  const TaskPriority priority;
+  TaskPriority priority_;
 
   // Sequenced time of the next task to run in the sequence at the time this
   // sort key was created.
-  const TimeTicks next_task_sequenced_time;
+  TimeTicks next_task_sequenced_time_;
 };
 
 }  // namespace internal
diff --git a/base/task_scheduler/sequence_sort_key_unittest.cc b/base/task_scheduler/sequence_sort_key_unittest.cc
index 5c6c917..2c1d80d 100644
--- a/base/task_scheduler/sequence_sort_key_unittest.cc
+++ b/base/task_scheduler/sequence_sort_key_unittest.cc
@@ -125,5 +125,119 @@
   EXPECT_FALSE(key_f > key_f);
 }
 
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorEqual) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_EQ(key_a, key_a);
+  EXPECT_FALSE(key_b == key_a);
+  EXPECT_FALSE(key_c == key_a);
+  EXPECT_FALSE(key_d == key_a);
+  EXPECT_FALSE(key_e == key_a);
+  EXPECT_FALSE(key_f == key_a);
+
+  EXPECT_FALSE(key_a == key_b);
+  EXPECT_EQ(key_b, key_b);
+  EXPECT_FALSE(key_c == key_b);
+  EXPECT_FALSE(key_d == key_b);
+  EXPECT_FALSE(key_e == key_b);
+  EXPECT_FALSE(key_f == key_b);
+
+  EXPECT_FALSE(key_a == key_c);
+  EXPECT_FALSE(key_b == key_c);
+  EXPECT_EQ(key_c, key_c);
+  EXPECT_FALSE(key_d == key_c);
+  EXPECT_FALSE(key_e == key_c);
+  EXPECT_FALSE(key_f == key_c);
+
+  EXPECT_FALSE(key_a == key_d);
+  EXPECT_FALSE(key_b == key_d);
+  EXPECT_FALSE(key_c == key_d);
+  EXPECT_EQ(key_d, key_d);
+  EXPECT_FALSE(key_e == key_d);
+  EXPECT_FALSE(key_f == key_d);
+
+  EXPECT_FALSE(key_a == key_e);
+  EXPECT_FALSE(key_b == key_e);
+  EXPECT_FALSE(key_c == key_e);
+  EXPECT_FALSE(key_d == key_e);
+  EXPECT_EQ(key_e, key_e);
+  EXPECT_FALSE(key_f == key_e);
+
+  EXPECT_FALSE(key_a == key_f);
+  EXPECT_FALSE(key_b == key_f);
+  EXPECT_FALSE(key_c == key_f);
+  EXPECT_FALSE(key_d == key_f);
+  EXPECT_FALSE(key_e == key_f);
+  EXPECT_EQ(key_f, key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorNotEqual) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a != key_a);
+  EXPECT_NE(key_b, key_a);
+  EXPECT_NE(key_c, key_a);
+  EXPECT_NE(key_d, key_a);
+  EXPECT_NE(key_e, key_a);
+  EXPECT_NE(key_f, key_a);
+
+  EXPECT_NE(key_a, key_b);
+  EXPECT_FALSE(key_b != key_b);
+  EXPECT_NE(key_c, key_b);
+  EXPECT_NE(key_d, key_b);
+  EXPECT_NE(key_e, key_b);
+  EXPECT_NE(key_f, key_b);
+
+  EXPECT_NE(key_a, key_c);
+  EXPECT_NE(key_b, key_c);
+  EXPECT_FALSE(key_c != key_c);
+  EXPECT_NE(key_d, key_c);
+  EXPECT_NE(key_e, key_c);
+  EXPECT_NE(key_f, key_c);
+
+  EXPECT_NE(key_a, key_d);
+  EXPECT_NE(key_b, key_d);
+  EXPECT_NE(key_c, key_d);
+  EXPECT_FALSE(key_d != key_d);
+  EXPECT_NE(key_e, key_d);
+  EXPECT_NE(key_f, key_d);
+
+  EXPECT_NE(key_a, key_e);
+  EXPECT_NE(key_b, key_e);
+  EXPECT_NE(key_c, key_e);
+  EXPECT_NE(key_d, key_e);
+  EXPECT_FALSE(key_e != key_e);
+  EXPECT_NE(key_f, key_e);
+
+  EXPECT_NE(key_a, key_f);
+  EXPECT_NE(key_b, key_f);
+  EXPECT_NE(key_c, key_f);
+  EXPECT_NE(key_d, key_f);
+  EXPECT_NE(key_e, key_f);
+  EXPECT_FALSE(key_f != key_f);
+}
+
 }  // namespace internal
 }  // namespace base
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
index d81fece..6a15299 100644
--- a/base/task_scheduler/sequence_unittest.cc
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -19,23 +19,28 @@
       : task_a_owned_(
             new Task(FROM_HERE,
                      Closure(),
-                     TaskTraits().WithPriority(TaskPriority::BACKGROUND))),
+                     TaskTraits().WithPriority(TaskPriority::BACKGROUND),
+                     TimeDelta())),
         task_b_owned_(
             new Task(FROM_HERE,
                      Closure(),
-                     TaskTraits().WithPriority(TaskPriority::USER_VISIBLE))),
+                     TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
+                     TimeDelta())),
         task_c_owned_(
             new Task(FROM_HERE,
                      Closure(),
-                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING))),
+                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
+                     TimeDelta())),
         task_d_owned_(
             new Task(FROM_HERE,
                      Closure(),
-                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING))),
+                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
+                     TimeDelta())),
         task_e_owned_(
             new Task(FROM_HERE,
                      Closure(),
-                     TaskTraits().WithPriority(TaskPriority::BACKGROUND))),
+                     TaskTraits().WithPriority(TaskPriority::BACKGROUND),
+                     TimeDelta())),
         task_a_(task_a_owned_.get()),
         task_b_(task_b_owned_.get()),
         task_c_(task_c_owned_.get()),
@@ -44,11 +49,11 @@
 
  protected:
   // Tasks to be handed off to a Sequence for testing.
-  scoped_ptr<Task> task_a_owned_;
-  scoped_ptr<Task> task_b_owned_;
-  scoped_ptr<Task> task_c_owned_;
-  scoped_ptr<Task> task_d_owned_;
-  scoped_ptr<Task> task_e_owned_;
+  std::unique_ptr<Task> task_a_owned_;
+  std::unique_ptr<Task> task_b_owned_;
+  std::unique_ptr<Task> task_c_owned_;
+  std::unique_ptr<Task> task_d_owned_;
+  std::unique_ptr<Task> task_e_owned_;
 
   // Raw pointers to those same tasks for verification. This is needed because
   // the scoped_ptrs above no longer point to the tasks once they have been
@@ -63,13 +68,6 @@
   DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSequenceTest);
 };
 
-void ExpectSortKey(TaskPriority expected_priority,
-                   TimeTicks expected_sequenced_time,
-                   const SequenceSortKey& actual_sort_key) {
-  EXPECT_EQ(expected_priority, actual_sort_key.priority);
-  EXPECT_EQ(expected_sequenced_time, actual_sort_key.next_task_sequenced_time);
-}
-
 }  // namespace
 
 TEST_F(TaskSchedulerSequenceTest, PushPopPeek) {
@@ -128,56 +126,63 @@
   // Push task A in the sequence. The highest priority is from task A
   // (BACKGROUND). Task A is in front of the sequence.
   sequence->PushTask(std::move(task_a_owned_));
-  ExpectSortKey(TaskPriority::BACKGROUND, task_a_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_a_->sequenced_time),
+            sequence->GetSortKey());
 
   // Push task B in the sequence. The highest priority is from task B
   // (USER_VISIBLE). Task A is still in front of the sequence.
   sequence->PushTask(std::move(task_b_owned_));
-  ExpectSortKey(TaskPriority::USER_VISIBLE, task_a_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_VISIBLE, task_a_->sequenced_time),
+      sequence->GetSortKey());
 
   // Push task C in the sequence. The highest priority is from task C
   // (USER_BLOCKING). Task A is still in front of the sequence.
   sequence->PushTask(std::move(task_c_owned_));
-  ExpectSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
+      sequence->GetSortKey());
 
   // Push task D in the sequence. The highest priority is from tasks C/D
   // (USER_BLOCKING). Task A is still in front of the sequence.
   sequence->PushTask(std::move(task_d_owned_));
-  ExpectSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
+      sequence->GetSortKey());
 
   // Pop task A. The highest priority is still USER_BLOCKING. The task in front
   // of the sequence is now task B.
   sequence->PopTask();
-  ExpectSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time),
+      sequence->GetSortKey());
 
   // Pop task B. The highest priority is still USER_BLOCKING. The task in front
   // of the sequence is now task C.
   sequence->PopTask();
-  ExpectSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time),
+      sequence->GetSortKey());
 
   // Pop task C. The highest priority is still USER_BLOCKING. The task in front
   // of the sequence is now task D.
   sequence->PopTask();
-  ExpectSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
+      sequence->GetSortKey());
 
   // Push task E in the sequence. The highest priority is still USER_BLOCKING.
   // The task in front of the sequence is still task D.
   sequence->PushTask(std::move(task_e_owned_));
-  ExpectSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(
+      SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
+      sequence->GetSortKey());
 
   // Pop task D. The highest priority is now from task E (BACKGROUND). The
   // task in front of the sequence is now task E.
   sequence->PopTask();
-  ExpectSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time,
-                sequence->GetSortKey());
+  EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time),
+            sequence->GetSortKey());
 }
 
 }  // namespace internal
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
index ae63403..8a589a2 100644
--- a/base/task_scheduler/task.cc
+++ b/base/task_scheduler/task.cc
@@ -9,11 +9,12 @@
 
 Task::Task(const tracked_objects::Location& posted_from,
            const Closure& task,
-           const TaskTraits& traits)
+           const TaskTraits& traits,
+           const TimeDelta& delay)
     : PendingTask(posted_from,
                   task,
-                  TimeTicks(),  // No delayed run time.
-                  false),       // Not nestable.
+                  delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
+                  false),  // Not nestable.
       traits(traits) {}
 
 Task::~Task() = default;
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
index 6ac483d..2b53c69 100644
--- a/base/task_scheduler/task.h
+++ b/base/task_scheduler/task.h
@@ -8,7 +8,11 @@
 #include "base/base_export.h"
 #include "base/callback_forward.h"
 #include "base/location.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
 #include "base/pending_task.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
 #include "base/task_scheduler/task_traits.h"
 #include "base/time/time.h"
 
@@ -18,9 +22,13 @@
 // A task is a unit of work inside the task scheduler. Support for tracing and
 // profiling inherited from PendingTask.
 struct BASE_EXPORT Task : public PendingTask {
+  // |posted_from| is the site the task was posted from. |task| is the closure
+  // to run. |traits| is metadata about the task. |delay| is a delay that must
+  // expire before the Task runs.
   Task(const tracked_objects::Location& posted_from,
        const Closure& task,
-       const TaskTraits& traits);
+       const TaskTraits& traits,
+       const TimeDelta& delay);
   ~Task();
 
   // The TaskTraits of this task.
@@ -31,6 +39,23 @@
   // time after the task's delay has expired. If the task hasn't been inserted
   // in a sequence yet, this defaults to a null TimeTicks.
   TimeTicks sequenced_time;
+
+  // A reference to the SequencedTaskRunner or SingleThreadTaskRunner that
+  // posted this task, if any. Used to set ThreadTaskRunnerHandle and/or
+  // SequencedTaskRunnerHandle while the task is running.
+  // Note: this creates an ownership cycle
+  //   Sequence -> Task -> TaskRunner -> Sequence -> ...
+  // but that's okay as it's broken when the Task is popped from its Sequence
+  // after being executed which means this cycle forces the TaskRunner to stick
+  // around until all its tasks have been executed which is a requirement to
+  // support TaskRunnerHandles.
+  scoped_refptr<SequencedTaskRunner> sequenced_task_runner_ref;
+  scoped_refptr<SingleThreadTaskRunner> single_thread_task_runner_ref;
+
+ private:
+  // Disallow copies to make sure no unnecessary ref-bumps are incurred. Making
+  // it move-only would be an option, but isn't necessary for now.
+  DISALLOW_COPY_AND_ASSIGN(Task);
 };
 
 }  // namespace internal
diff --git a/base/task_scheduler/task_traits.cc b/base/task_scheduler/task_traits.cc
index 9e5be32..dd55535 100644
--- a/base/task_scheduler/task_traits.cc
+++ b/base/task_scheduler/task_traits.cc
@@ -4,6 +4,8 @@
 
 #include "base/task_scheduler/task_traits.h"
 
+#include <stddef.h>
+
 #include <ostream>
 
 namespace base {
@@ -14,7 +16,7 @@
 TaskTraits::TaskTraits()
     : with_file_io_(false),
       priority_(TaskPriority::BACKGROUND),
-      shutdown_behavior_(TaskShutdownBehavior::BLOCK_SHUTDOWN) {}
+      shutdown_behavior_(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {}
 
 TaskTraits::~TaskTraits() = default;
 
@@ -34,18 +36,35 @@
   return *this;
 }
 
-void PrintTo(const TaskPriority& task_priority, std::ostream* os) {
+std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
   switch (task_priority) {
     case TaskPriority::BACKGROUND:
-      *os << "BACKGROUND";
+      os << "BACKGROUND";
       break;
     case TaskPriority::USER_VISIBLE:
-      *os << "USER_VISIBLE";
+      os << "USER_VISIBLE";
       break;
     case TaskPriority::USER_BLOCKING:
-      *os << "USER_BLOCKING";
+      os << "USER_BLOCKING";
       break;
   }
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         const TaskShutdownBehavior& shutdown_behavior) {
+  switch (shutdown_behavior) {
+    case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
+      os << "CONTINUE_ON_SHUTDOWN";
+      break;
+    case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
+      os << "SKIP_ON_SHUTDOWN";
+      break;
+    case TaskShutdownBehavior::BLOCK_SHUTDOWN:
+      os << "BLOCK_SHUTDOWN";
+      break;
+  }
+  return os;
 }
 
 }  // namespace base
diff --git a/base/task_scheduler/task_traits.h b/base/task_scheduler/task_traits.h
index fbd63c5..523fd13 100644
--- a/base/task_scheduler/task_traits.h
+++ b/base/task_scheduler/task_traits.h
@@ -126,8 +126,14 @@
   SINGLE_THREADED,
 };
 
-// Pretty Printer for Google Test.
-void BASE_EXPORT PrintTo(const TaskPriority& task_priority, std::ostream* os);
+// Stream operators so TaskPriority and TaskShutdownBehavior can be used in
+// DCHECK statements.
+BASE_EXPORT std::ostream& operator<<(std::ostream& os,
+                                     const TaskPriority& shutdown_behavior);
+
+BASE_EXPORT std::ostream& operator<<(
+    std::ostream& os,
+    const TaskShutdownBehavior& shutdown_behavior);
 
 }  // namespace base
 
diff --git a/base/task_scheduler/test_utils.h b/base/task_scheduler/test_utils.h
new file mode 100644
index 0000000..bafd09a
--- /dev/null
+++ b/base/task_scheduler/test_utils.h
@@ -0,0 +1,19 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
+#define BASE_TASK_SCHEDULER_TEST_UTILS_H_
+
+#include "base/logging.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests misbehave on Android.
+#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define EXPECT_DCHECK_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
+#else
+#define EXPECT_DCHECK_DEATH(statement, regex)
+#endif
+
+#endif  // BASE_TASK_SCHEDULER_TEST_UTILS_H_
diff --git a/base/template_util.h b/base/template_util.h
index 0c3cac2..74c8e5a 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -5,7 +5,9 @@
 #ifndef BASE_TEMPLATE_UTIL_H_
 #define BASE_TEMPLATE_UTIL_H_
 
+#include <stddef.h>
 #include <type_traits>
+#include <utility>
 
 #include "build/build_config.h"
 
@@ -15,6 +17,71 @@
 template <class T> struct is_non_const_reference<T&> : std::true_type {};
 template <class T> struct is_non_const_reference<const T&> : std::false_type {};
 
+// is_assignable
+
+namespace internal {
+
+template <typename First, typename Second>
+struct SelectSecond {
+  using type = Second;
+};
+
+struct Any {
+  Any(...);
+};
+
+// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
+// value is a true_type.
+template <class Lvalue, class Rvalue>
+typename internal::SelectSecond<
+    decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
+    std::true_type>::type
+IsAssignableTest(Lvalue&&, Rvalue&&);
+
+// False case: Otherwise the return value is a false_type.
+template <class Rvalue>
+std::false_type IsAssignableTest(internal::Any, Rvalue&&);
+
+// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
+// determine the type of IsAssignableImpl.
+template <class Lvalue,
+          class Rvalue,
+          bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
+struct IsAssignableImpl
+    : public std::common_type<decltype(
+          internal::IsAssignableTest(std::declval<Lvalue>(),
+                                     std::declval<Rvalue>()))>::type {};
+
+// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
+// is false_type.
+template <class Lvalue, class Rvalue>
+struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
+
+}  // namespace internal
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class Lvalue, class Rvalue>
+struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
+
+// is_copy_assignable is true if a T const& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_copy_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           typename std::add_lvalue_reference<
+                               typename std::add_const<T>::type>::type> {};
+
+// is_move_assignable is true if a T&& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_move_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           const typename std::add_rvalue_reference<T>::type> {
+};
+
 }  // namespace base
 
 #endif  // BASE_TEMPLATE_UTIL_H_
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index 25441cd..5686d7c 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -15,5 +15,38 @@
               "IsNonConstReference");
 static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
 
+class AssignParent {};
+class AssignChild : AssignParent {};
+
+// is_assignable<Type1, Type2>
+static_assert(!is_assignable<int, int>::value, "IsAssignable");  // 1 = 1;
+static_assert(!is_assignable<int, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int>::value, "IsAssignable");
+static_assert(is_assignable<int&, double>::value, "IsAssignable");
+static_assert(is_assignable<int&, int&>::value, "IsAssignable");
+static_assert(is_assignable<int&, int const&>::value, "IsAssignable");
+static_assert(!is_assignable<int const&, int>::value, "IsAssignable");
+static_assert(!is_assignable<AssignParent&, AssignChild>::value,
+              "IsAssignable");
+static_assert(!is_assignable<AssignChild&, AssignParent>::value,
+              "IsAssignable");
+
+struct AssignCopy {};
+struct AssignNoCopy {
+  AssignNoCopy& operator=(AssignNoCopy&&) { return *this; }
+  AssignNoCopy& operator=(const AssignNoCopy&) = delete;
+};
+struct AssignNoMove {
+  AssignNoMove& operator=(AssignNoMove&&) = delete;
+  AssignNoMove& operator=(const AssignNoMove&) = delete;
+};
+
+static_assert(is_copy_assignable<AssignCopy>::value, "IsCopyAssignable");
+static_assert(!is_copy_assignable<AssignNoCopy>::value, "IsCopyAssignable");
+
+static_assert(is_move_assignable<AssignCopy>::value, "IsMoveAssignable");
+static_assert(is_move_assignable<AssignNoCopy>::value, "IsMoveAssignable");
+static_assert(!is_move_assignable<AssignNoMove>::value, "IsMoveAssignable");
+
 }  // namespace
 }  // namespace base
diff --git a/base/test/BUILD.gn b/base/test/BUILD.gn
index 463f343..a080141 100644
--- a/base/test/BUILD.gn
+++ b/base/test/BUILD.gn
@@ -63,6 +63,8 @@
     "perf_time_logger.h",
     "power_monitor_test_base.cc",
     "power_monitor_test_base.h",
+    "scoped_command_line.cc",
+    "scoped_command_line.h",
     "scoped_locale.cc",
     "scoped_locale.h",
     "scoped_path_override.cc",
@@ -90,6 +92,8 @@
     "test_io_thread.h",
     "test_listener_ios.h",
     "test_listener_ios.mm",
+    "test_message_loop.cc",
+    "test_message_loop.h",
     "test_mock_time_task_runner.cc",
     "test_mock_time_task_runner.h",
     "test_pending_task.cc",
@@ -168,6 +172,10 @@
     set_sources_assignment_filter(sources_assignment_filter)
   }
 
+  if (is_mac) {
+    libs = [ "AppKit.framework" ]
+  }
+
   if (is_android) {
     deps += [ ":base_unittests_jni_headers" ]
   }
diff --git a/base/test/multiprocess_test.cc b/base/test/multiprocess_test.cc
index 6a1b7b4..de56e7f 100644
--- a/base/test/multiprocess_test.cc
+++ b/base/test/multiprocess_test.cc
@@ -26,7 +26,7 @@
 
   return LaunchProcess(command_line, options);
 }
-#endif  // !defined(OS_ANDROID)
+#endif  // !OS_ANDROID && !__ANDROID__ && !__ANDROID_HOST__
 
 CommandLine GetMultiProcessTestChildBaseCommandLine() {
   CommandLine cmd_line = *CommandLine::ForCurrentProcess();
diff --git a/base/test/multiprocess_test.h b/base/test/multiprocess_test.h
index ab1d0ca..ae4c3eb 100644
--- a/base/test/multiprocess_test.h
+++ b/base/test/multiprocess_test.h
@@ -66,6 +66,25 @@
 // may add any flags needed for your child process.
 CommandLine GetMultiProcessTestChildBaseCommandLine();
 
+#if defined(OS_ANDROID)
+
+// Enable the alternate test child implementation which support spawning a child
+// after threads have been created. If used, this MUST be the first line of
+// main(). The main function is passed in to avoid a link-time dependency in
+// component builds.
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**));
+
+// Returns true if the current process is a test child.
+bool AndroidIsChildProcess();
+
+// Wait for a test child to exit if the alternate test child implementation is
+// being used.
+bool AndroidWaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code)
+    WARN_UNUSED_RESULT;
+
+#endif  // defined(OS_ANDROID)
+
 // MultiProcessTest ------------------------------------------------------------
 
 // A MultiProcessTest is a test class which makes it easier to
diff --git a/base/test/multiprocess_test_android.cc b/base/test/multiprocess_test_android.cc
index dc489d1..f58b452 100644
--- a/base/test/multiprocess_test_android.cc
+++ b/base/test/multiprocess_test_android.cc
@@ -4,17 +4,391 @@
 
 #include "base/test/multiprocess_test.h"
 
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/socket.h>
 #include <unistd.h>
 
+#include <memory>
+#include <utility>
+#include <vector>
+
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/containers/hash_tables.h"
+#include "base/lazy_instance.h"
 #include "base/logging.h"
+#include "base/macros.h"
+#include "base/pickle.h"
 #include "base/posix/global_descriptors.h"
+#include "base/posix/unix_domain_socket_linux.h"
 #include "testing/multiprocess_func_list.h"
 
 namespace base {
 
+namespace {
+
+const int kMaxMessageSize = 1024 * 1024;
+const int kFragmentSize = 4096;
+
+// Message sent between parent process and helper child process.
+enum class MessageType : uint32_t {
+  START_REQUEST,
+  START_RESPONSE,
+  WAIT_REQUEST,
+  WAIT_RESPONSE,
+};
+
+struct MessageHeader {
+  uint32_t size;
+  MessageType type;
+};
+
+struct StartProcessRequest {
+  MessageHeader header =
+      {sizeof(StartProcessRequest), MessageType::START_REQUEST};
+
+  uint32_t num_args = 0;
+  uint32_t num_fds = 0;
+};
+
+struct StartProcessResponse {
+  MessageHeader header =
+      {sizeof(StartProcessResponse), MessageType::START_RESPONSE};
+
+  pid_t child_pid;
+};
+
+struct WaitProcessRequest {
+  MessageHeader header =
+      {sizeof(WaitProcessRequest), MessageType::WAIT_REQUEST};
+
+  pid_t pid;
+  uint64_t timeout_ms;
+};
+
+struct WaitProcessResponse {
+  MessageHeader header =
+      {sizeof(WaitProcessResponse), MessageType::WAIT_RESPONSE};
+
+  bool success = false;
+  int32_t exit_code = 0;
+};
+
+// Helper class that implements an alternate test child launcher for
+// multi-process tests. The default implementation doesn't work if the child is
+// launched after starting threads. However, for some tests (i.e. Mojo), this
+// is necessary. This implementation works around that issue by forking a helper
+// process very early in main(), before any real work is done. Then, when a
+// child needs to be spawned, a message is sent to that helper process, which
+// then forks and returns the result to the parent. The forked child then calls
+// main() and things look as though a brand new process has been fork/exec'd.
+class LaunchHelper {
+ public:
+  using MainFunction = int (*)(int, char**);
+
+  LaunchHelper() {}
+
+  // Initialise the alternate test child implementation.
+  void Init(MainFunction main);
+
+  // Starts a child test helper process.
+  Process StartChildTestHelper(const std::string& procname,
+                               const CommandLine& base_command_line,
+                               const LaunchOptions& options);
+
+  // Waits for a child test helper process.
+  bool WaitForChildExitWithTimeout(const Process& process, TimeDelta timeout,
+                                   int* exit_code);
+
+  bool IsReady() const { return child_fd_ != -1; }
+  bool IsChild() const { return is_child_; }
+
+ private:
+  // Wrappers around sendmsg/recvmsg that supports message fragmentation.
+  void Send(int fd, const MessageHeader* msg, const std::vector<int>& fds);
+  ssize_t Recv(int fd, void* buf, std::vector<ScopedFD>* fds);
+
+  // Parent process implementation.
+  void DoParent(int fd);
+  // Helper process implementation.
+  void DoHelper(int fd);
+
+  void StartProcessInHelper(const StartProcessRequest* request,
+                           std::vector<ScopedFD> fds);
+  void WaitForChildInHelper(const WaitProcessRequest* request);
+
+  bool is_child_ = false;
+
+  // Parent vars.
+  int child_fd_ = -1;
+
+  // Helper vars.
+  int parent_fd_ = -1;
+  MainFunction main_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(LaunchHelper);
+};
+
+void LaunchHelper::Init(MainFunction main) {
+  main_ = main;
+
+  // Create a communication channel between the parent and child launch helper.
+  // fd[0] belongs to the parent, fd[1] belongs to the child.
+  int fds[2] = {-1, -1};
+  int rv = socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds);
+  PCHECK(rv == 0);
+  CHECK_NE(-1, fds[0]);
+  CHECK_NE(-1, fds[1]);
+
+  pid_t pid = fork();
+  PCHECK(pid >= 0) << "Fork failed";
+  if (pid) {
+    // Parent.
+    rv = close(fds[1]);
+    PCHECK(rv == 0);
+    DoParent(fds[0]);
+  } else {
+    // Helper.
+    rv = close(fds[0]);
+    PCHECK(rv == 0);
+    DoHelper(fds[1]);
+    NOTREACHED();
+    _exit(0);
+  }
+}
+
+void LaunchHelper::Send(
+    int fd, const MessageHeader* msg, const std::vector<int>& fds) {
+  uint32_t bytes_remaining = msg->size;
+  const char* buf = reinterpret_cast<const char*>(msg);
+  while (bytes_remaining) {
+    size_t send_size =
+        (bytes_remaining > kFragmentSize) ? kFragmentSize : bytes_remaining;
+    bool success = UnixDomainSocket::SendMsg(
+        fd, buf, send_size,
+        (bytes_remaining == msg->size) ? fds : std::vector<int>());
+    CHECK(success);
+    bytes_remaining -= send_size;
+    buf += send_size;
+  }
+}
+
+ssize_t LaunchHelper::Recv(int fd, void* buf, std::vector<ScopedFD>* fds) {
+  ssize_t size = UnixDomainSocket::RecvMsg(fd, buf, kFragmentSize, fds);
+  if (size <= 0)
+    return size;
+
+  const MessageHeader* header = reinterpret_cast<const MessageHeader*>(buf);
+  CHECK(header->size < kMaxMessageSize);
+  uint32_t bytes_remaining = header->size - size;
+  char* buffer = reinterpret_cast<char*>(buf);
+  buffer += size;
+  while (bytes_remaining) {
+    std::vector<ScopedFD> dummy_fds;
+    size = UnixDomainSocket::RecvMsg(fd, buffer, kFragmentSize, &dummy_fds);
+    if (size <= 0)
+      return size;
+
+    CHECK(dummy_fds.empty());
+    CHECK(size == kFragmentSize ||
+          static_cast<size_t>(size) == bytes_remaining);
+    bytes_remaining -= size;
+    buffer += size;
+  }
+  return header->size;
+}
+
+void LaunchHelper::DoParent(int fd) {
+  child_fd_ = fd;
+}
+
+void LaunchHelper::DoHelper(int fd) {
+  parent_fd_ = fd;
+  is_child_ = true;
+  std::unique_ptr<char[]> buf(new char[kMaxMessageSize]);
+  while (true) {
+    // Wait for a message from the parent.
+    std::vector<ScopedFD> fds;
+    ssize_t size = Recv(parent_fd_, buf.get(), &fds);
+    if (size == 0 || (size < 0 && errno == ECONNRESET)) {
+      _exit(0);
+    }
+    PCHECK(size > 0);
+
+    const MessageHeader* header =
+        reinterpret_cast<const MessageHeader*>(buf.get());
+    CHECK_EQ(static_cast<ssize_t>(header->size), size);
+    switch (header->type) {
+      case MessageType::START_REQUEST:
+        StartProcessInHelper(
+            reinterpret_cast<const StartProcessRequest*>(buf.get()),
+            std::move(fds));
+        break;
+      case MessageType::WAIT_REQUEST:
+        WaitForChildInHelper(
+            reinterpret_cast<const WaitProcessRequest*>(buf.get()));
+        break;
+      default:
+        LOG(FATAL) << "Unsupported message type: "
+                   << static_cast<uint32_t>(header->type);
+    }
+  }
+}
+
+void LaunchHelper::StartProcessInHelper(const StartProcessRequest* request,
+                                        std::vector<ScopedFD> fds) {
+  pid_t pid = fork();
+  PCHECK(pid >= 0) << "Fork failed";
+  if (pid) {
+    // Helper.
+    StartProcessResponse resp;
+    resp.child_pid = pid;
+    Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+         std::vector<int>());
+  } else {
+    // Child.
+    PCHECK(close(parent_fd_) == 0);
+    parent_fd_ = -1;
+    CommandLine::Reset();
+
+    Pickle serialised_extra(reinterpret_cast<const char*>(request + 1),
+                            request->header.size - sizeof(StartProcessRequest));
+    PickleIterator iter(serialised_extra);
+    std::vector<std::string> args;
+    for (size_t i = 0; i < request->num_args; i++) {
+      std::string arg;
+      CHECK(iter.ReadString(&arg));
+      args.push_back(std::move(arg));
+    }
+
+    CHECK_EQ(request->num_fds, fds.size());
+    for (size_t i = 0; i < request->num_fds; i++) {
+      int new_fd;
+      CHECK(iter.ReadInt(&new_fd));
+      int old_fd = fds[i].release();
+      if (new_fd != old_fd) {
+        if (dup2(old_fd, new_fd) < 0) {
+          PLOG(FATAL) << "dup2";
+        }
+        PCHECK(close(old_fd) == 0);
+      }
+    }
+
+    // argv has argc+1 elements, where the last element is NULL.
+    std::unique_ptr<char*[]> argv(new char*[args.size() + 1]);
+    for (size_t i = 0; i < args.size(); i++) {
+      argv[i] = const_cast<char*>(args[i].c_str());
+    }
+    argv[args.size()] = nullptr;
+    _exit(main_(args.size(), argv.get()));
+    NOTREACHED();
+  }
+}
+
+void LaunchHelper::WaitForChildInHelper(const WaitProcessRequest* request) {
+  Process process(request->pid);
+  TimeDelta timeout = TimeDelta::FromMilliseconds(request->timeout_ms);
+  int exit_code = -1;
+  bool success = process.WaitForExitWithTimeout(timeout, &exit_code);
+
+  WaitProcessResponse resp;
+  resp.exit_code = exit_code;
+  resp.success = success;
+  Send(parent_fd_, reinterpret_cast<const MessageHeader*>(&resp),
+       std::vector<int>());
+}
+
+Process LaunchHelper::StartChildTestHelper(const std::string& procname,
+                                           const CommandLine& base_command_line,
+                                           const LaunchOptions& options) {
+
+  CommandLine command_line(base_command_line);
+  if (!command_line.HasSwitch(switches::kTestChildProcess))
+    command_line.AppendSwitchASCII(switches::kTestChildProcess, procname);
+
+  StartProcessRequest request;
+  Pickle serialised_extra;
+  const CommandLine::StringVector& argv = command_line.argv();
+  for (const auto& arg : argv)
+    CHECK(serialised_extra.WriteString(arg));
+  request.num_args = argv.size();
+
+  std::vector<int> fds_to_send;
+  if (options.fds_to_remap) {
+    for (auto p : *options.fds_to_remap) {
+      CHECK(serialised_extra.WriteInt(p.second));
+      fds_to_send.push_back(p.first);
+    }
+    request.num_fds = options.fds_to_remap->size();
+  }
+
+  size_t buf_size = sizeof(StartProcessRequest) + serialised_extra.size();
+  request.header.size = buf_size;
+  std::unique_ptr<char[]> buffer(new char[buf_size]);
+  memcpy(buffer.get(), &request, sizeof(StartProcessRequest));
+  memcpy(buffer.get() + sizeof(StartProcessRequest), serialised_extra.data(),
+         serialised_extra.size());
+
+  // Send start message.
+  Send(child_fd_, reinterpret_cast<const MessageHeader*>(buffer.get()),
+       fds_to_send);
+
+  // Synchronously get response.
+  StartProcessResponse response;
+  std::vector<ScopedFD> recv_fds;
+  ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+  PCHECK(resp_size == sizeof(StartProcessResponse));
+
+  return Process(response.child_pid);
+}
+
+bool LaunchHelper::WaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code) {
+
+  WaitProcessRequest request;
+  request.pid = process.Handle();
+  request.timeout_ms = timeout.InMilliseconds();
+
+  Send(child_fd_, reinterpret_cast<const MessageHeader*>(&request),
+       std::vector<int>());
+
+  WaitProcessResponse response;
+  std::vector<ScopedFD> recv_fds;
+  ssize_t resp_size = Recv(child_fd_, &response, &recv_fds);
+  PCHECK(resp_size == sizeof(WaitProcessResponse));
+
+  if (!response.success)
+    return false;
+
+  *exit_code = response.exit_code;
+  return true;
+}
+
+LazyInstance<LaunchHelper>::Leaky g_launch_helper;
+
+}  // namespace
+
+void InitAndroidMultiProcessTestHelper(int (*main)(int, char**)) {
+  DCHECK(main);
+  // Don't allow child processes to themselves create new child processes.
+  if (g_launch_helper.Get().IsChild())
+    return;
+  g_launch_helper.Get().Init(main);
+}
+
+bool AndroidIsChildProcess() {
+  return g_launch_helper.Get().IsChild();
+}
+
+bool AndroidWaitForChildExitWithTimeout(
+    const Process& process, TimeDelta timeout, int* exit_code) {
+  CHECK(g_launch_helper.Get().IsReady());
+  return g_launch_helper.Get().WaitForChildExitWithTimeout(
+      process, timeout, exit_code);
+}
+
 // A very basic implementation for Android. On Android tests can run in an APK
 // and we don't have an executable to exec*. This implementation does the bare
 // minimum to execute the method specified by procname (in the child process).
@@ -22,6 +396,11 @@
 Process SpawnMultiProcessTestChild(const std::string& procname,
                                    const CommandLine& base_command_line,
                                    const LaunchOptions& options) {
+  if (g_launch_helper.Get().IsReady()) {
+    return g_launch_helper.Get().StartChildTestHelper(
+        procname, base_command_line, options);
+  }
+
   // TODO(viettrungluu): The FD-remapping done below is wrong in the presence of
   // cycles (e.g., fd1 -> fd2, fd2 -> fd1). crbug.com/326576
   FileHandleMappingVector empty;
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index d912df4..87b107e 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -52,9 +52,9 @@
   state->SetInteger("delay", delay.ToInternalValue());
 }
 
-scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
 TestPendingTask::AsValue() const {
-  scoped_ptr<base::trace_event::TracedValue> state(
+  std::unique_ptr<base::trace_event::TracedValue> state(
       new base::trace_event::TracedValue());
   AsValueInto(state.get());
   return std::move(state);
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 3b29961..2dbdb7e 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -59,7 +59,7 @@
   // Functions for using test pending task with tracing, useful in unit
   // testing.
   void AsValueInto(base::trace_event::TracedValue* state) const;
-  scoped_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
   std::string ToString() const;
 };
 
diff --git a/base/test/trace_event_analyzer.cc b/base/test/trace_event_analyzer.cc
index 2046355..64436dc 100644
--- a/base/test/trace_event_analyzer.cc
+++ b/base/test/trace_event_analyzer.cc
@@ -7,10 +7,10 @@
 #include <math.h>
 
 #include <algorithm>
+#include <memory>
 #include <set>
 
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/pattern.h"
 #include "base/values.h"
 
@@ -26,9 +26,13 @@
       other_event(NULL) {
 }
 
+TraceEvent::TraceEvent(TraceEvent&& other) = default;
+
 TraceEvent::~TraceEvent() {
 }
 
+TraceEvent& TraceEvent::operator=(TraceEvent&& rhs) = default;
+
 bool TraceEvent::SetFromJSON(const base::Value* event_value) {
   if (event_value->GetType() != base::Value::TYPE_DICTIONARY) {
     LOG(ERROR) << "Value must be TYPE_DICTIONARY";
@@ -52,6 +56,12 @@
   bool require_id = (phase == TRACE_EVENT_PHASE_ASYNC_BEGIN ||
                      phase == TRACE_EVENT_PHASE_ASYNC_STEP_INTO ||
                      phase == TRACE_EVENT_PHASE_ASYNC_STEP_PAST ||
+                     phase == TRACE_EVENT_PHASE_MEMORY_DUMP ||
+                     phase == TRACE_EVENT_PHASE_ENTER_CONTEXT ||
+                     phase == TRACE_EVENT_PHASE_LEAVE_CONTEXT ||
+                     phase == TRACE_EVENT_PHASE_CREATE_OBJECT ||
+                     phase == TRACE_EVENT_PHASE_DELETE_OBJECT ||
+                     phase == TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ||
                      phase == TRACE_EVENT_PHASE_ASYNC_END);
 
   if (require_origin && !dictionary->GetInteger("pid", &thread.process_id)) {
@@ -101,11 +111,9 @@
       arg_numbers[it.key()] = static_cast<double>(boolean ? 1 : 0);
     } else if (it.value().GetAsDouble(&double_num)) {
       arg_numbers[it.key()] = double_num;
-    } else {
-      LOG(WARNING) << "Value type of argument is not supported: " <<
-          static_cast<int>(it.value().GetType());
-      continue;  // Skip non-supported arguments.
     }
+    // Record all arguments as values.
+    arg_values[it.key()] = it.value().CreateDeepCopy();
   }
 
   return true;
@@ -117,9 +125,9 @@
 
 bool TraceEvent::GetArgAsString(const std::string& name,
                                 std::string* arg) const {
-  std::map<std::string, std::string>::const_iterator i = arg_strings.find(name);
-  if (i != arg_strings.end()) {
-    *arg = i->second;
+  const auto it = arg_strings.find(name);
+  if (it != arg_strings.end()) {
+    *arg = it->second;
     return true;
   }
   return false;
@@ -127,9 +135,19 @@
 
 bool TraceEvent::GetArgAsNumber(const std::string& name,
                                 double* arg) const {
-  std::map<std::string, double>::const_iterator i = arg_numbers.find(name);
-  if (i != arg_numbers.end()) {
-    *arg = i->second;
+  const auto it = arg_numbers.find(name);
+  if (it != arg_numbers.end()) {
+    *arg = it->second;
+    return true;
+  }
+  return false;
+}
+
+bool TraceEvent::GetArgAsValue(const std::string& name,
+                               std::unique_ptr<base::Value>* arg) const {
+  const auto it = arg_values.find(name);
+  if (it != arg_values.end()) {
+    *arg = it->second->CreateDeepCopy();
     return true;
   }
   return false;
@@ -143,6 +161,10 @@
   return (arg_numbers.find(name) != arg_numbers.end());
 }
 
+bool TraceEvent::HasArg(const std::string& name) const {
+  return (arg_values.find(name) != arg_values.end());
+}
+
 std::string TraceEvent::GetKnownArgAsString(const std::string& name) const {
   std::string arg_string;
   bool result = GetArgAsString(name, &arg_string);
@@ -171,6 +193,14 @@
   return (arg_double != 0.0);
 }
 
+std::unique_ptr<base::Value> TraceEvent::GetKnownArgAsValue(
+    const std::string& name) const {
+  std::unique_ptr<base::Value> arg_value;
+  bool result = GetArgAsValue(name, &arg_value);
+  DCHECK(result);
+  return arg_value;
+}
+
 // QueryNode
 
 QueryNode::QueryNode(const Query& query) : query_(query) {
@@ -649,7 +679,7 @@
 
 bool ParseEventsFromJson(const std::string& json,
                          std::vector<TraceEvent>* output) {
-  scoped_ptr<base::Value> root = base::JSONReader::Read(json);
+  std::unique_ptr<base::Value> root = base::JSONReader::Read(json);
 
   base::ListValue* root_list = NULL;
   if (!root.get() || !root->GetAsList(&root_list))
@@ -660,7 +690,7 @@
     if (root_list->Get(i, &item)) {
       TraceEvent event;
       if (event.SetFromJSON(item))
-        output->push_back(event);
+        output->push_back(std::move(event));
       else
         return false;
     }
@@ -682,7 +712,7 @@
 
 // static
 TraceAnalyzer* TraceAnalyzer::Create(const std::string& json_events) {
-  scoped_ptr<TraceAnalyzer> analyzer(new TraceAnalyzer());
+  std::unique_ptr<TraceAnalyzer> analyzer(new TraceAnalyzer());
   if (analyzer->SetEvents(json_events))
     return analyzer.release();
   return NULL;
diff --git a/base/test/trace_event_analyzer.h b/base/test/trace_event_analyzer.h
index f67445a..0e2366b 100644
--- a/base/test/trace_event_analyzer.h
+++ b/base/test/trace_event_analyzer.h
@@ -111,6 +111,7 @@
   };
 
   TraceEvent();
+  TraceEvent(TraceEvent&& other);
   ~TraceEvent();
 
   bool SetFromJSON(const base::Value* event_value) WARN_UNUSED_RESULT;
@@ -119,6 +120,8 @@
     return timestamp < rhs.timestamp;
   }
 
+  TraceEvent& operator=(TraceEvent&& rhs);
+
   bool has_other_event() const { return other_event; }
 
   // Returns absolute duration in microseconds between this event and other
@@ -130,11 +133,16 @@
   bool GetArgAsString(const std::string& name, std::string* arg) const;
   // Return the argument value if it exists and it is a number.
   bool GetArgAsNumber(const std::string& name, double* arg) const;
+  // Return the argument value if it exists.
+  bool GetArgAsValue(const std::string& name,
+                     std::unique_ptr<base::Value>* arg) const;
 
   // Check if argument exists and is string.
   bool HasStringArg(const std::string& name) const;
   // Check if argument exists and is number (double, int or bool).
   bool HasNumberArg(const std::string& name) const;
+  // Check if argument exists.
+  bool HasArg(const std::string& name) const;
 
   // Get known existing arguments as specific types.
   // Useful when you have already queried the argument with
@@ -143,6 +151,8 @@
   double GetKnownArgAsDouble(const std::string& name) const;
   int GetKnownArgAsInt(const std::string& name) const;
   bool GetKnownArgAsBool(const std::string& name) const;
+  std::unique_ptr<base::Value> GetKnownArgAsValue(
+      const std::string& name) const;
 
   // Process ID and Thread ID.
   ProcessThreadID thread;
@@ -150,22 +160,17 @@
   // Time since epoch in microseconds.
   // Stored as double to match its JSON representation.
   double timestamp;
-
   double duration;
-
   char phase;
-
   std::string category;
-
   std::string name;
-
   std::string id;
 
   // All numbers and bool values from TraceEvent args are cast to double.
   // bool becomes 1.0 (true) or 0.0 (false).
   std::map<std::string, double> arg_numbers;
-
   std::map<std::string, std::string> arg_strings;
+  std::map<std::string, std::unique_ptr<base::Value>> arg_values;
 
   // The other event associated with this event (or NULL).
   const TraceEvent* other_event;
diff --git a/base/test/trace_event_analyzer_unittest.cc b/base/test/trace_event_analyzer_unittest.cc
index 700b920..e73dd65 100644
--- a/base/test/trace_event_analyzer_unittest.cc
+++ b/base/test/trace_event_analyzer_unittest.cc
@@ -2,14 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/test/trace_event_analyzer.h"
+
 #include <stddef.h>
 #include <stdint.h>
 
 #include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted_memory.h"
 #include "base/synchronization/waitable_event.h"
-#include "base/test/trace_event_analyzer.h"
 #include "base/threading/platform_thread.h"
 #include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event_argument.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -74,8 +78,8 @@
   buffer_.Start();
   buffer_.Finish();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
 
   // Search for all events and verify that nothing is returned.
@@ -97,6 +101,7 @@
   event.arg_numbers["int"] = static_cast<double>(int_num);
   event.arg_numbers["double"] = double_num;
   event.arg_strings["string"] = str;
+  event.arg_values["dict"] = WrapUnique(new base::DictionaryValue());
 
   ASSERT_TRUE(event.HasNumberArg("false"));
   ASSERT_TRUE(event.HasNumberArg("true"));
@@ -105,12 +110,18 @@
   ASSERT_TRUE(event.HasStringArg("string"));
   ASSERT_FALSE(event.HasNumberArg("notfound"));
   ASSERT_FALSE(event.HasStringArg("notfound"));
+  ASSERT_TRUE(event.HasArg("dict"));
+  ASSERT_FALSE(event.HasArg("notfound"));
 
   EXPECT_FALSE(event.GetKnownArgAsBool("false"));
   EXPECT_TRUE(event.GetKnownArgAsBool("true"));
   EXPECT_EQ(int_num, event.GetKnownArgAsInt("int"));
   EXPECT_EQ(double_num, event.GetKnownArgAsDouble("double"));
   EXPECT_STREQ(str, event.GetKnownArgAsString("string").c_str());
+
+  std::unique_ptr<base::Value> arg;
+  EXPECT_TRUE(event.GetArgAsValue("dict", &arg));
+  EXPECT_EQ(base::Value::TYPE_DICTIONARY, arg->GetType());
 }
 
 TEST_F(TraceEventAnalyzerTest, QueryEventMember) {
@@ -226,8 +237,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer);
   analyzer->SetIgnoreMetadataEvents(true);
 
@@ -317,8 +328,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
 
   TraceEventVector found;
@@ -372,8 +383,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->SetIgnoreMetadataEvents(true);
 
@@ -422,8 +433,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -464,8 +475,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -496,8 +507,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -519,8 +530,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateBeginEndEvents();
 
@@ -552,8 +563,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateAsyncBeginEndEvents();
 
@@ -584,8 +595,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
   analyzer->AssociateAsyncBeginEndEvents();
 
@@ -637,8 +648,8 @@
   }
   EndTracing();
 
-  scoped_ptr<TraceAnalyzer>
-      analyzer(TraceAnalyzer::Create(output_.json_output));
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
   ASSERT_TRUE(analyzer.get());
 
   // begin, end, and match queries to find proper begin/end pairs.
@@ -711,8 +722,7 @@
   std::vector<TraceEvent> events;
   events.reserve(100);
   TraceEventVector event_ptrs;
-  TraceEvent event;
-  event.timestamp = 0.0;
+  double timestamp = 0.0;
   double little_delta = 1.0;
   double big_delta = 10.0;
   double tiny_delta = 0.1;
@@ -721,8 +731,10 @@
 
   // Insert 10 events, each apart by little_delta.
   for (int i = 0; i < 10; ++i) {
-    event.timestamp += little_delta;
-    events.push_back(event);
+    timestamp += little_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
     event_ptrs.push_back(&events.back());
   }
 
@@ -733,9 +745,13 @@
   EXPECT_EQ(0.0, stats.standard_deviation_us);
 
   // Add an event apart by big_delta.
-  event.timestamp += big_delta;
-  events.push_back(event);
-  event_ptrs.push_back(&events.back());
+  {
+    timestamp += big_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
+    event_ptrs.push_back(&events.back());
+  }
 
   ASSERT_TRUE(GetRateStats(event_ptrs, &stats, NULL));
   EXPECT_LT(little_delta, stats.mean_us);
@@ -753,9 +769,13 @@
   EXPECT_EQ(0.0, stats.standard_deviation_us);
 
   // Add an event apart by tiny_delta.
-  event.timestamp += tiny_delta;
-  events.push_back(event);
-  event_ptrs.push_back(&events.back());
+  {
+    timestamp += tiny_delta;
+    TraceEvent event;
+    event.timestamp = timestamp;
+    events.push_back(std::move(event));
+    event_ptrs.push_back(&events.back());
+  }
 
   // Trim off both the biggest and tiniest delta and verify stats.
   options.trim_min = 1;
@@ -767,17 +787,20 @@
   EXPECT_EQ(0.0, stats.standard_deviation_us);
 
   // Verify smallest allowed number of events.
-  TraceEventVector few_event_ptrs;
-  few_event_ptrs.push_back(&event);
-  few_event_ptrs.push_back(&event);
-  ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, NULL));
-  few_event_ptrs.push_back(&event);
-  ASSERT_TRUE(GetRateStats(few_event_ptrs, &stats, NULL));
+  {
+    TraceEvent event;
+    TraceEventVector few_event_ptrs;
+    few_event_ptrs.push_back(&event);
+    few_event_ptrs.push_back(&event);
+    ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, NULL));
+    few_event_ptrs.push_back(&event);
+    ASSERT_TRUE(GetRateStats(few_event_ptrs, &stats, NULL));
 
-  // Trim off more than allowed and verify failure.
-  options.trim_min = 0;
-  options.trim_max = 1;
-  ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, &options));
+    // Trim off more than allowed and verify failure.
+    options.trim_min = 0;
+    options.trim_max = 1;
+    ASSERT_FALSE(GetRateStats(few_event_ptrs, &stats, &options));
+  }
 }
 
 // Test FindFirstOf and FindLastOf.
@@ -894,5 +917,37 @@
   EXPECT_EQ(num_named, CountMatches(event_ptrs, query_named));
 }
 
+TEST_F(TraceEventAnalyzerTest, ComplexArgument) {
+  ManualSetUp();
+
+  BeginTracing();
+  {
+    std::unique_ptr<base::trace_event::TracedValue> value(
+        new base::trace_event::TracedValue);
+    value->SetString("property", "value");
+    TRACE_EVENT1("cat", "name", "arg", std::move(value));
+  }
+  EndTracing();
+
+  std::unique_ptr<TraceAnalyzer> analyzer(
+      TraceAnalyzer::Create(output_.json_output));
+  ASSERT_TRUE(analyzer.get());
+
+  TraceEventVector events;
+  analyzer->FindEvents(Query::EventName() == Query::String("name"), &events);
+
+  EXPECT_EQ(1u, events.size());
+  EXPECT_EQ("cat", events[0]->category);
+  EXPECT_EQ("name", events[0]->name);
+  EXPECT_TRUE(events[0]->HasArg("arg"));
+
+  std::unique_ptr<base::Value> arg;
+  events[0]->GetArgAsValue("arg", &arg);
+  base::DictionaryValue* arg_dict;
+  EXPECT_TRUE(arg->GetAsDictionary(&arg_dict));
+  std::string property;
+  EXPECT_TRUE(arg_dict->GetString("property", &property));
+  EXPECT_EQ("value", property);
+}
 
 }  // namespace trace_analyzer
diff --git a/base/third_party/dynamic_annotations/dynamic_annotations.h b/base/third_party/dynamic_annotations/dynamic_annotations.h
new file mode 100644
index 0000000..8d7f052
--- /dev/null
+++ b/base/third_party/dynamic_annotations/dynamic_annotations.h
@@ -0,0 +1,595 @@
+/* Copyright (c) 2011, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This file defines dynamic annotations for use with dynamic analysis
+   tool such as valgrind, PIN, etc.
+
+   Dynamic annotation is a source code annotation that affects
+   the generated code (that is, the annotation is not a comment).
+   Each such annotation is attached to a particular
+   instruction and/or to a particular object (address) in the program.
+
+   The annotations that should be used by users are macros in all upper-case
+   (e.g., ANNOTATE_NEW_MEMORY).
+
+   Actual implementation of these macros may differ depending on the
+   dynamic analysis tool being used.
+
+   See http://code.google.com/p/data-race-test/  for more information.
+
+   This file supports the following dynamic analysis tools:
+   - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
+      Macros are defined empty.
+   - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
+      Macros are defined as calls to non-inlinable empty functions
+      that are intercepted by Valgrind. */
+
+#ifndef __DYNAMIC_ANNOTATIONS_H__
+#define __DYNAMIC_ANNOTATIONS_H__
+
+#ifndef DYNAMIC_ANNOTATIONS_PREFIX
+# define DYNAMIC_ANNOTATIONS_PREFIX
+#endif
+
+#ifndef DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND
+# define DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND 1
+#endif
+
+#ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK
+# ifdef __GNUC__
+#  define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak))
+# else
+/* TODO(glider): for Windows support we may want to change this macro in order
+   to prepend __declspec(selectany) to the annotations' declarations. */
+#  error weak annotations are not supported for your compiler
+# endif
+#else
+# define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
+#endif
+
+/* The following preprocessor magic prepends the value of
+   DYNAMIC_ANNOTATIONS_PREFIX to annotation function names. */
+#define DYNAMIC_ANNOTATIONS_GLUE0(A, B) A##B
+#define DYNAMIC_ANNOTATIONS_GLUE(A, B) DYNAMIC_ANNOTATIONS_GLUE0(A, B)
+#define DYNAMIC_ANNOTATIONS_NAME(name) \
+  DYNAMIC_ANNOTATIONS_GLUE(DYNAMIC_ANNOTATIONS_PREFIX, name)
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+# define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing condition variables such as CondVar,
+     using conditional critical sections (Await/LockWhen) and when constructing
+     user-defined synchronization mechanisms.
+
+     The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can
+     be used to define happens-before arcs in user-defined synchronization
+     mechanisms:  the race detector will infer an arc from the former to the
+     latter when they share the same argument pointer.
+
+     Example 1 (reference counting):
+
+     void Unref() {
+       ANNOTATE_HAPPENS_BEFORE(&refcount_);
+       if (AtomicDecrementByOne(&refcount_) == 0) {
+         ANNOTATE_HAPPENS_AFTER(&refcount_);
+         delete this;
+       }
+     }
+
+     Example 2 (message queue):
+
+     void MyQueue::Put(Type *e) {
+       MutexLock lock(&mu_);
+       ANNOTATE_HAPPENS_BEFORE(e);
+       PutElementIntoMyQueue(e);
+     }
+
+     Type *MyQueue::Get() {
+       MutexLock lock(&mu_);
+       Type *e = GetElementFromMyQueue();
+       ANNOTATE_HAPPENS_AFTER(e);
+       return e;
+     }
+
+     Note: when possible, please use the existing reference counting and message
+     queue implementations instead of inventing new ones. */
+
+  /* Report that wait on the condition variable at address "cv" has succeeded
+     and the lock at address "lock" is held. */
+  #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, lock)
+
+  /* Report that wait on the condition variable at "cv" has succeeded.  Variant
+     w/o lock. */
+  #define ANNOTATE_CONDVAR_WAIT(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(__FILE__, __LINE__, cv, NULL)
+
+  /* Report that we are about to signal on the condition variable at address
+     "cv". */
+  #define ANNOTATE_CONDVAR_SIGNAL(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(__FILE__, __LINE__, cv)
+
+  /* Report that we are about to signal_all on the condition variable at address
+     "cv". */
+  #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(__FILE__, __LINE__, cv)
+
+  /* Annotations for user-defined synchronization mechanisms. */
+  #define ANNOTATE_HAPPENS_BEFORE(obj) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(__FILE__, __LINE__, obj)
+  #define ANNOTATE_HAPPENS_AFTER(obj) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(__FILE__, __LINE__, obj)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(__FILE__, __LINE__, \
+        pointer, size)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(__FILE__, __LINE__, \
+        pointer, size)
+
+  /* DEPRECATED. Don't use it. */
+  #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size)   \
+    do {                                              \
+      ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \
+      ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size);   \
+    } while (0)
+
+  /* Instruct the tool to create a happens-before arc between mu->Unlock() and
+     mu->Lock(). This annotation may slow down the race detector and hide real
+     races. Normally it is used only when it would be difficult to annotate each
+     of the mutex's critical sections individually using the annotations above.
+     This annotation makes sense only for hybrid race detectors. For pure
+     happens-before detectors this is a no-op. For more details see
+     http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
+  #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+        mu)
+
+  /* Opposite to ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX.
+     Instruct the tool to NOT create h-b arcs between Unlock and Lock, even in
+     pure happens-before mode. For a hybrid mode this is a no-op. */
+  #define ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(__FILE__, __LINE__, mu)
+
+  /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
+  #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(__FILE__, __LINE__, \
+        mu)
+
+  /* -------------------------------------------------------------
+     Annotations useful when defining memory allocators, or when memory that
+     was protected in one way starts to be protected in another. */
+
+  /* Report that a new memory at "address" of size "size" has been allocated.
+     This might be used when the memory has been retrieved from a free list and
+     is about to be reused, or when a the locking discipline for a variable
+     changes. */
+  #define ANNOTATE_NEW_MEMORY(address, size) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(__FILE__, __LINE__, address, \
+        size)
+
+  /* -------------------------------------------------------------
+     Annotations useful when defining FIFO queues that transfer data between
+     threads. */
+
+  /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
+     address "pcq" has been created.  The ANNOTATE_PCQ_* annotations
+     should be used only for FIFO queues.  For non-FIFO queues use
+     ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
+  #define ANNOTATE_PCQ_CREATE(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(__FILE__, __LINE__, pcq)
+
+  /* Report that the queue at address "pcq" is about to be destroyed. */
+  #define ANNOTATE_PCQ_DESTROY(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(__FILE__, __LINE__, pcq)
+
+  /* Report that we are about to put an element into a FIFO queue at address
+     "pcq". */
+  #define ANNOTATE_PCQ_PUT(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(__FILE__, __LINE__, pcq)
+
+  /* Report that we've just got an element from a FIFO queue at address
+     "pcq". */
+  #define ANNOTATE_PCQ_GET(pcq) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(__FILE__, __LINE__, pcq)
+
+  /* -------------------------------------------------------------
+     Annotations that suppress errors.  It is usually better to express the
+     program's synchronization using the other annotations, but these can
+     be used when all else fails. */
+
+  /* Report that we may have a benign race at "pointer", with size
+     "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
+     point where "pointer" has been allocated, preferably close to the point
+     where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC. */
+  #define ANNOTATE_BENIGN_RACE(pointer, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+        pointer, sizeof(*(pointer)), description)
+
+  /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
+     the memory range [address, address+size). */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(__FILE__, __LINE__, \
+        address, size, description)
+
+  /* Request the analysis tool to ignore all reads in the current thread
+     until ANNOTATE_IGNORE_READS_END is called.
+     Useful to ignore intentional racey reads, while still checking
+     other reads and all writes.
+     See also ANNOTATE_UNPROTECTED_READ. */
+  #define ANNOTATE_IGNORE_READS_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring reads. */
+  #define ANNOTATE_IGNORE_READS_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+  /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring writes. */
+  #define ANNOTATE_IGNORE_WRITES_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+  /* Start ignoring all memory accesses (reads and writes). */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+    do {\
+      ANNOTATE_IGNORE_READS_BEGIN();\
+      ANNOTATE_IGNORE_WRITES_BEGIN();\
+    }while(0)\
+
+  /* Stop ignoring all memory accesses. */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+    do {\
+      ANNOTATE_IGNORE_WRITES_END();\
+      ANNOTATE_IGNORE_READS_END();\
+    }while(0)\
+
+  /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events:
+     RWLOCK* and CONDVAR*. */
+  #define ANNOTATE_IGNORE_SYNC_BEGIN() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(__FILE__, __LINE__)
+
+  /* Stop ignoring sync events. */
+  #define ANNOTATE_IGNORE_SYNC_END() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(__FILE__, __LINE__)
+
+
+  /* Enable (enable!=0) or disable (enable==0) race detection for all threads.
+     This annotation could be useful if you want to skip expensive race analysis
+     during some period of program execution, e.g. during initialization. */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(__FILE__, __LINE__, \
+        enable)
+
+  /* -------------------------------------------------------------
+     Annotations useful for debugging. */
+
+  /* Request to trace every access to "address". */
+  #define ANNOTATE_TRACE_MEMORY(address) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(__FILE__, __LINE__, address)
+
+  /* Report the current thread name to a race detector. */
+  #define ANNOTATE_THREAD_NAME(name) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing locks.  They are not
+     normally needed by modules that merely use locks.
+     The "lock" argument is a pointer to the lock object. */
+
+  /* Report that a lock has been created at address "lock". */
+  #define ANNOTATE_RWLOCK_CREATE(lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+  /* Report that the lock at address "lock" is about to be destroyed. */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+  /* Report that the lock at address "lock" has been acquired.
+     is_w=1 for writer lock, is_w=0 for reader lock. */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(__FILE__, __LINE__, lock, \
+        is_w)
+
+  /* Report that the lock at address "lock" is about to be released. */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(__FILE__, __LINE__, lock, \
+        is_w)
+
+  /* -------------------------------------------------------------
+     Annotations useful when implementing barriers.  They are not
+     normally needed by modules that merely use barriers.
+     The "barrier" argument is a pointer to the barrier object. */
+
+  /* Report that the "barrier" has been initialized with initial "count".
+   If 'reinitialization_allowed' is true, initialization is allowed to happen
+   multiple times w/o calling barrier_destroy() */
+  #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(__FILE__, __LINE__, barrier, \
+        count, reinitialization_allowed)
+
+  /* Report that we are about to enter barrier_wait("barrier"). */
+  #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(__FILE__, __LINE__, \
+        barrier)
+
+  /* Report that we just exited barrier_wait("barrier"). */
+  #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(__FILE__, __LINE__, \
+        barrier)
+
+  /* Report that the "barrier" has been destroyed. */
+  #define ANNOTATE_BARRIER_DESTROY(barrier) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(__FILE__, __LINE__, \
+        barrier)
+
+  /* -------------------------------------------------------------
+     Annotations useful for testing race detectors. */
+
+  /* Report that we expect a race on the variable at "address".
+     Use only in unit tests for a race detector. */
+  #define ANNOTATE_EXPECT_RACE(address, description) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(__FILE__, __LINE__, address, \
+        description)
+
+  #define ANNOTATE_FLUSH_EXPECTED_RACES() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(__FILE__, __LINE__)
+
+  /* A no-op. Insert where you like to test the interceptors. */
+  #define ANNOTATE_NO_OP(arg) \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(__FILE__, __LINE__, arg)
+
+  /* Force the race detector to flush its state. The actual effect depends on
+   * the implementation of the detector. */
+  #define ANNOTATE_FLUSH_STATE() \
+    DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(__FILE__, __LINE__)
+
+
+#else  /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+  #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */
+  #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
+  #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
+  #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
+  #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
+  #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
+  #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
+  #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
+  #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
+  #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */
+  #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
+  #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
+  #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
+  #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */
+  #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
+  #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size)  /* empty */
+  #define ANNOTATE_SWAP_MEMORY_RANGE(address, size)  /* empty */
+  #define ANNOTATE_PCQ_CREATE(pcq) /* empty */
+  #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */
+  #define ANNOTATE_PCQ_PUT(pcq) /* empty */
+  #define ANNOTATE_PCQ_GET(pcq) /* empty */
+  #define ANNOTATE_NEW_MEMORY(address, size) /* empty */
+  #define ANNOTATE_EXPECT_RACE(address, description) /* empty */
+  #define ANNOTATE_FLUSH_EXPECTED_RACES(address, description) /* empty */
+  #define ANNOTATE_BENIGN_RACE(address, description) /* empty */
+  #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
+  #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
+  #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
+  #define ANNOTATE_TRACE_MEMORY(arg) /* empty */
+  #define ANNOTATE_THREAD_NAME(name) /* empty */
+  #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_READS_END() /* empty */
+  #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_WRITES_END() /* empty */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
+  #define ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */
+  #define ANNOTATE_IGNORE_SYNC_END() /* empty */
+  #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
+  #define ANNOTATE_NO_OP(arg) /* empty */
+  #define ANNOTATE_FLUSH_STATE() /* empty */
+
+#endif  /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+/* Use the macros above rather than using these functions directly. */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockCreate)(
+    const char *file, int line,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockDestroy)(
+    const char *file, int line,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockAcquired)(
+    const char *file, int line,
+    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateRWLockReleased)(
+    const char *file, int line,
+    const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierInit)(
+    const char *file, int line, const volatile void *barrier, long count,
+    long reinitialization_allowed) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitBefore)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierWaitAfter)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBarrierDestroy)(
+    const char *file, int line,
+    const volatile void *barrier) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarWait)(
+    const char *file, int line, const volatile void *cv,
+    const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignal)(
+    const char *file, int line,
+    const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateCondVarSignalAll)(
+    const char *file, int line,
+    const volatile void *cv) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensBefore)(
+    const char *file, int line,
+    const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateHappensAfter)(
+    const char *file, int line,
+    const volatile void *obj) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePublishMemoryRange)(
+    const char *file, int line,
+    const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateUnpublishMemoryRange)(
+    const char *file, int line,
+    const volatile void *address, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQCreate)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQDestroy)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQPut)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotatePCQGet)(
+    const char *file, int line,
+    const volatile void *pcq) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNewMemory)(
+    const char *file, int line,
+    const volatile void *mem, long size) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateExpectRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushExpectedRaces)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRace)(
+    const char *file, int line, const volatile void *mem,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateBenignRaceSized)(
+    const char *file, int line, const volatile void *mem, long size,
+    const char *description) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsUsedAsCondVar)(
+    const char *file, int line,
+    const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateMutexIsNotPHB)(
+    const char *file, int line,
+    const volatile void *mu) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateTraceMemory)(
+    const char *file, int line,
+    const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateThreadName)(
+    const char *file, int line,
+    const char *name) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreReadsEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreWritesEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncBegin)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateIgnoreSyncEnd)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateEnableRaceDetection)(
+    const char *file, int line, int enable) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateNoOp)(
+    const char *file, int line,
+    const volatile void *arg) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+void DYNAMIC_ANNOTATIONS_NAME(AnnotateFlushState)(
+    const char *file, int line) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+
+#if DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1
+/* Return non-zero value if running under valgrind.
+
+  If "valgrind.h" is included into dynamic_annotations.c,
+  the regular valgrind mechanism will be used.
+  See http://valgrind.org/docs/manual/manual-core-adv.html about
+  RUNNING_ON_VALGRIND and other valgrind "client requests".
+  The file "valgrind.h" may be obtained by doing
+     svn co svn://svn.valgrind.org/valgrind/trunk/include
+
+  If for some reason you can't use "valgrind.h" or want to fake valgrind,
+  there are two ways to make this function return non-zero:
+    - Use environment variable: export RUNNING_ON_VALGRIND=1
+    - Make your tool intercept the function RunningOnValgrind() and
+      change its return value.
+ */
+int RunningOnValgrind(void) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
+#endif /* DYNAMIC_ANNOTATIONS_PROVIDE_RUNNING_ON_VALGRIND == 1 */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
+
+  /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+
+     Instead of doing
+        ANNOTATE_IGNORE_READS_BEGIN();
+        ... = x;
+        ANNOTATE_IGNORE_READS_END();
+     one can use
+        ... = ANNOTATE_UNPROTECTED_READ(x); */
+  template <class T>
+  inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) {
+    ANNOTATE_IGNORE_READS_BEGIN();
+    T res = x;
+    ANNOTATE_IGNORE_READS_END();
+    return res;
+  }
+  /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)        \
+    namespace {                                                       \
+      class static_var ## _annotator {                                \
+       public:                                                        \
+        static_var ## _annotator() {                                  \
+          ANNOTATE_BENIGN_RACE_SIZED(&static_var,                     \
+                                      sizeof(static_var),             \
+            # static_var ": " description);                           \
+        }                                                             \
+      };                                                              \
+      static static_var ## _annotator the ## static_var ## _annotator;\
+    }
+#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
+
+  #define ANNOTATE_UNPROTECTED_READ(x) (x)
+  #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)  /* empty */
+
+#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
+
+#endif  /* __DYNAMIC_ANNOTATIONS_H__ */
diff --git a/base/third_party/libevent/event.h b/base/third_party/libevent/event.h
new file mode 100644
index 0000000..d47d797
--- /dev/null
+++ b/base/third_party/libevent/event.h
@@ -0,0 +1,10 @@
+// The Chromium build contains its own checkout of libevent. This stub is used
+// when building the Chrome OS or Android libchrome package to instead use the
+// system headers.
+#if defined(__ANDROID__) || defined(__ANDROID_HOST__)
+#include <event2/event.h>
+#include <event2/event_compat.h>
+#include <event2/event_struct.h>
+#else
+#include <event.h>
+#endif
diff --git a/base/third_party/nspr/prtime.cc b/base/third_party/nspr/prtime.cc
index 6c07f0d..97d2c27 100644
--- a/base/third_party/nspr/prtime.cc
+++ b/base/third_party/nspr/prtime.cc
@@ -379,7 +379,7 @@
  */
 
 PRTimeParameters
-PR_GMTParameters(const PRExplodedTime* /* gmt */)
+PR_GMTParameters(const PRExplodedTime* /*gmt*/)
 {
     PRTimeParameters retVal = { 0, 0 };
     return retVal;
diff --git a/base/third_party/valgrind/memcheck.h b/base/third_party/valgrind/memcheck.h
new file mode 100644
index 0000000..aac34fc
--- /dev/null
+++ b/base/third_party/valgrind/memcheck.h
@@ -0,0 +1,282 @@
+#ifdef ANDROID
+  #include "memcheck/memcheck.h"
+#else
+/*
+   ----------------------------------------------------------------
+
+   Notice that the following BSD-style license applies to this one
+   file (memcheck.h) only.  The rest of Valgrind is licensed under the
+   terms of the GNU General Public License, version 2, unless
+   otherwise indicated.  See the COPYING file in the source
+   distribution for details.
+
+   ----------------------------------------------------------------
+
+   This file is part of MemCheck, a heavyweight Valgrind tool for
+   detecting memory errors.
+
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   ----------------------------------------------------------------
+
+   Notice that the above BSD-style license applies to this one file
+   (memcheck.h) only.  The entire rest of Valgrind is licensed under
+   the terms of the GNU General Public License, version 2.  See the
+   COPYING file in the source distribution for details.
+
+   ---------------------------------------------------------------- 
+*/
+
+
+#ifndef __MEMCHECK_H
+#define __MEMCHECK_H
+
+
+/* This file is for inclusion into client (your!) code.
+
+   You can use these macros to manipulate and query memory permissions
+   inside your own programs.
+
+   See comment near the top of valgrind.h on how to use them.
+*/
+
+#include "valgrind.h"
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
+   This enum comprises an ABI exported by Valgrind to programs
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+   enum { 
+      VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
+      VG_USERREQ__MAKE_MEM_UNDEFINED,
+      VG_USERREQ__MAKE_MEM_DEFINED,
+      VG_USERREQ__DISCARD,
+      VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+      VG_USERREQ__CHECK_MEM_IS_DEFINED,
+      VG_USERREQ__DO_LEAK_CHECK,
+      VG_USERREQ__COUNT_LEAKS,
+
+      VG_USERREQ__GET_VBITS,
+      VG_USERREQ__SET_VBITS,
+
+      VG_USERREQ__CREATE_BLOCK,
+
+      VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
+
+      /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
+      VG_USERREQ__COUNT_LEAK_BLOCKS,
+
+      /* This is just for memcheck's internal use - don't use it */
+      _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR 
+         = VG_USERREQ_TOOL_BASE('M','C') + 256
+   } Vg_MemCheckClientRequest;
+
+
+
+/* Client-code macros to manipulate the state of memory. */
+
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len)           \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_NOACCESS,       \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+      
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
+   for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len)          \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_UNDEFINED,      \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similarly, mark memory at _qzz_addr as addressable and defined
+   for _qzz_len bytes. */
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len)            \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__MAKE_MEM_DEFINED,        \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
+   not altered: bytes which are addressable are marked as defined,
+   but those which are not addressable are left unchanged. */
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len)     \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,              \
+                            VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Create a block-description handle.  The description is an ascii
+   string which is included in any messages pertaining to addresses
+   within the specified memory range.  Has no other effect on the
+   properties of the memory range. */
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc)	   \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,        \
+                            VG_USERREQ__CREATE_BLOCK,              \
+                            (_qzz_addr), (_qzz_len), (_qzz_desc),  \
+                            0, 0)
+
+/* Discard a block-description-handle. Returns 1 for an
+   invalid handle, 0 for a valid handle. */
+#define VALGRIND_DISCARD(_qzz_blkindex)                          \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
+                            VG_USERREQ__DISCARD,                 \
+                            0, (_qzz_blkindex), 0, 0, 0)
+
+
+/* Client-code macros to check the state of memory. */
+
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
+   If suitable addressibility is not established, Valgrind prints an
+   error message and returns the address of the first offending byte.
+   Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len)      \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                             \
+                            VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,  \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Check that memory at _qzz_addr is addressable and defined for
+   _qzz_len bytes.  If suitable addressibility and definedness are not
+   established, Valgrind prints an error message and returns the
+   address of the first offending byte.  Otherwise it returns zero. */
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len)        \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                           \
+                            VG_USERREQ__CHECK_MEM_IS_DEFINED,    \
+                            (_qzz_addr), (_qzz_len), 0, 0, 0)
+
+/* Use this macro to force the definedness and addressibility of an
+   lvalue to be checked.  If suitable addressibility and definedness
+   are not established, Valgrind prints an error message and returns
+   the address of the first offending byte.  Otherwise it returns
+   zero. */
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue)                \
+   VALGRIND_CHECK_MEM_IS_DEFINED(                                \
+      (volatile unsigned char *)&(__lvalue),                     \
+                      (unsigned long)(sizeof (__lvalue)))
+
+
+/* Do a full memory leak check (like --leak-check=full) mid-execution. */
+#define VALGRIND_DO_LEAK_CHECK                                   \
+   {unsigned long _qzz_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
+                            VG_USERREQ__DO_LEAK_CHECK,           \
+                            0, 0, 0, 0, 0);                      \
+   }
+
+/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
+#define VALGRIND_DO_QUICK_LEAK_CHECK				 \
+   {unsigned long _qzz_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
+                            VG_USERREQ__DO_LEAK_CHECK,           \
+                            1, 0, 0, 0, 0);                      \
+   }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+   all previous leak checks.  They must be lvalues.  */
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed)     \
+   /* For safety on 64-bit platforms we assign the results to private
+      unsigned long variables, then assign these to the lvalues the user
+      specified, which works no matter what type 'leaked', 'dubious', etc
+      are.  We also initialise '_qzz_leaked', etc because
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+      defined. */                                                        \
+   {unsigned long _qzz_res;                                              \
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
+                               VG_USERREQ__COUNT_LEAKS,                  \
+                               &_qzz_leaked, &_qzz_dubious,              \
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
+    leaked     = _qzz_leaked;                                            \
+    dubious    = _qzz_dubious;                                           \
+    reachable  = _qzz_reachable;                                         \
+    suppressed = _qzz_suppressed;                                        \
+   }
+
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
+   all previous leak checks.  They must be lvalues.  */
+#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
+   /* For safety on 64-bit platforms we assign the results to private
+      unsigned long variables, then assign these to the lvalues the user
+      specified, which works no matter what type 'leaked', 'dubious', etc
+      are.  We also initialise '_qzz_leaked', etc because
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
+      defined. */                                                        \
+   {unsigned long _qzz_res;                                              \
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
+                               VG_USERREQ__COUNT_LEAK_BLOCKS,            \
+                               &_qzz_leaked, &_qzz_dubious,              \
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
+    leaked     = _qzz_leaked;                                            \
+    dubious    = _qzz_dubious;                                           \
+    reachable  = _qzz_reachable;                                         \
+    suppressed = _qzz_suppressed;                                        \
+   }
+
+
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
+   into the provided zzvbits array.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   [previously indicated unaligned arrays;  these are now allowed]
+      3   if any parts of zzsrc/zzvbits are not addressable.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes)                \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                          \
+                                    VG_USERREQ__GET_VBITS,      \
+                                    (const char*)(zza),         \
+                                    (char*)(zzvbits),           \
+                                    (zznbytes), 0, 0)
+
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
+   from the provided zzvbits array.  Return values:
+      0   if not running on valgrind
+      1   success
+      2   [previously indicated unaligned arrays;  these are now allowed]
+      3   if any parts of zza/zzvbits are not addressable.
+   The metadata is not copied in cases 0, 2 or 3 so it should be
+   impossible to segfault your system by using this call.
+*/
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes)                \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                          \
+                                    VG_USERREQ__SET_VBITS,      \
+                                    (const char*)(zza),         \
+                                    (const char*)(zzvbits),     \
+                                    (zznbytes), 0, 0 )
+
+#endif
+
+#endif
diff --git a/base/third_party/valgrind/valgrind.h b/base/third_party/valgrind/valgrind.h
new file mode 100644
index 0000000..0668a71
--- /dev/null
+++ b/base/third_party/valgrind/valgrind.h
@@ -0,0 +1,4797 @@
+#ifdef ANDROID
+  #include "include/valgrind.h"
+#else
+/* -*- c -*-
+   ----------------------------------------------------------------
+
+   Notice that the following BSD-style license applies to this one
+   file (valgrind.h) only.  The rest of Valgrind is licensed under the
+   terms of the GNU General Public License, version 2, unless
+   otherwise indicated.  See the COPYING file in the source
+   distribution for details.
+
+   ----------------------------------------------------------------
+
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions
+   are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. The origin of this software must not be misrepresented; you must 
+      not claim that you wrote the original software.  If you use this 
+      software in a product, an acknowledgment in the product 
+      documentation would be appreciated but is not required.
+
+   3. Altered source versions must be plainly marked as such, and must
+      not be misrepresented as being the original software.
+
+   4. The name of the author may not be used to endorse or promote 
+      products derived from this software without specific prior written 
+      permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   ----------------------------------------------------------------
+
+   Notice that the above BSD-style license applies to this one file
+   (valgrind.h) only.  The entire rest of Valgrind is licensed under
+   the terms of the GNU General Public License, version 2.  See the
+   COPYING file in the source distribution for details.
+
+   ---------------------------------------------------------------- 
+*/
+
+
+/* This file is for inclusion into client (your!) code.
+
+   You can use these macros to manipulate and query Valgrind's 
+   execution inside your own programs.
+
+   The resulting executables will still run without Valgrind, just a
+   little bit more slowly than they otherwise would, but otherwise
+   unchanged.  When not running on valgrind, each client request
+   consumes very few (eg. 7) instructions, so the resulting performance
+   loss is negligible unless you plan to execute client requests
+   millions of times per second.  Nevertheless, if that is still a
+   problem, you can compile with the NVALGRIND symbol defined (gcc
+   -DNVALGRIND) so that client requests are not even compiled in.  */
+
+#ifndef __VALGRIND_H
+#define __VALGRIND_H
+
+
+/* ------------------------------------------------------------------ */
+/* VERSION NUMBER OF VALGRIND                                         */
+/* ------------------------------------------------------------------ */
+
+/* Specify Valgrind's version number, so that user code can
+   conditionally compile based on our version number.  Note that these
+   were introduced at version 3.6 and so do not exist in version 3.5
+   or earlier.  The recommended way to use them to check for "version
+   X.Y or later" is (eg)
+
+#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__)   \
+    && (__VALGRIND_MAJOR__ > 3                                   \
+        || (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
+*/
+#define __VALGRIND_MAJOR__    3
+#define __VALGRIND_MINOR__    6
+
+
+#include <stdarg.h>
+
+/* Nb: this file might be included in a file compiled with -ansi.  So
+   we can't use C++ style "//" comments nor the "asm" keyword (instead
+   use "__asm__"). */
+
+/* Derive some tags indicating what the target platform is.  Note
+   that in this file we're using the compiler's CPP symbols for
+   identifying architectures, which are different to the ones we use
+   within the rest of Valgrind.  Note, __powerpc__ is active for both
+   32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+   latter (on Linux, that is).
+
+   Misc note: how to find out what's predefined in gcc by default:
+   gcc -Wp,-dM somefile.c
+*/
+#undef PLAT_ppc64_aix5
+#undef PLAT_ppc32_aix5
+#undef PLAT_x86_darwin
+#undef PLAT_amd64_darwin
+#undef PLAT_x86_win32
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+
+#if defined(_AIX) && defined(__64BIT__)
+#  define PLAT_ppc64_aix5 1
+#elif defined(_AIX) && !defined(__64BIT__)
+#  define PLAT_ppc32_aix5 1
+#elif defined(__APPLE__) && defined(__i386__)
+#  define PLAT_x86_darwin 1
+#elif defined(__APPLE__) && defined(__x86_64__)
+#  define PLAT_amd64_darwin 1
+#elif defined(__MINGW32__) || defined(__CYGWIN32__) || defined(_WIN32) && defined(_M_IX86)
+#  define PLAT_x86_win32 1
+#elif defined(__linux__) && defined(__i386__)
+#  define PLAT_x86_linux 1
+#elif defined(__linux__) && defined(__x86_64__)
+#  define PLAT_amd64_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
+#  define PLAT_ppc32_linux 1
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
+#  define PLAT_ppc64_linux 1
+#elif defined(__linux__) && defined(__arm__)
+#  define PLAT_arm_linux 1
+#else
+/* If we're not compiling for our target platform, don't generate
+   any inline asms.  */
+#  if !defined(NVALGRIND)
+#    define NVALGRIND 1
+#  endif
+#endif
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS.  There is nothing */
+/* in here of use to end-users -- skip to the next section.           */
+/* ------------------------------------------------------------------ */
+
+#if defined(NVALGRIND)
+
+/* Define NVALGRIND to completely remove the Valgrind magic sequence
+   from the compiled code (analogous to NDEBUG's effects on
+   assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+   {                                                              \
+      (_zzq_rlval) = (_zzq_default);                              \
+   }
+
+#else  /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+   spots and handles magically.  Don't look too closely at them as
+   they will rot your brain.
+
+   The assembly code sequences for all architectures is in this one
+   file.  This is because this file must be stand-alone, and we don't
+   want to have multiple files.
+
+   For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+   value gets put in the return slot, so that everything works when
+   this is executed not under Valgrind.  Args are passed in a memory
+   block, and so there's no intrinsic limit to the number that could
+   be passed, but it's currently five.
+   
+   The macro args are: 
+      _zzq_rlval    result lvalue
+      _zzq_default  default value (result returned when running on real CPU)
+      _zzq_request  request code
+      _zzq_arg1..5  request params
+
+   The other two macros are used to support function wrapping, and are
+   a lot simpler.  VALGRIND_GET_NR_CONTEXT returns the value of the
+   guest's NRADDR pseudo-register and whatever other information is
+   needed to safely run the call original from the wrapper: on
+   ppc64-linux, the R2 value at the divert point is also needed.  This
+   information is abstracted into a user-visible type, OrigFn.
+
+   VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
+   guest, but guarantees that the branch instruction will not be
+   redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
+   branch-and-link-to-r11.  VALGRIND_CALL_NOREDIR is just text, not a
+   complete inline asm, since it needs to be combined with more magic
+   inline asm stuff to be useful.
+*/
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)  \
+    ||  (defined(PLAT_x86_win32) && defined(__GNUC__))
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "roll $3,  %%edi ; roll $13, %%edi\n\t"      \
+                     "roll $29, %%edi ; roll $19, %%edi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned int _zzq_args[6];                           \
+    volatile unsigned int _zzq_result;                            \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EDX = client_request ( %EAX ) */         \
+                     "xchgl %%ebx,%%ebx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %EAX = guest_NRADDR */                    \
+                     "xchgl %%ecx,%%ecx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%EAX */                     \
+                     "xchgl %%edx,%%edx\n\t"
+#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
+
+/* ------------------------- x86-Win32 ------------------------- */
+
+#if defined(PLAT_x86_win32) && !defined(__GNUC__)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#if defined(_MSC_VER)
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     __asm rol edi, 3  __asm rol edi, 13          \
+                     __asm rol edi, 29 __asm rol edi, 19
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile uintptr_t _zzq_args[6];                              \
+    volatile unsigned int _zzq_result;                            \
+    _zzq_args[0] = (uintptr_t)(_zzq_request);                     \
+    _zzq_args[1] = (uintptr_t)(_zzq_arg1);                        \
+    _zzq_args[2] = (uintptr_t)(_zzq_arg2);                        \
+    _zzq_args[3] = (uintptr_t)(_zzq_arg3);                        \
+    _zzq_args[4] = (uintptr_t)(_zzq_arg4);                        \
+    _zzq_args[5] = (uintptr_t)(_zzq_arg5);                        \
+    __asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default  \
+            __SPECIAL_INSTRUCTION_PREAMBLE                        \
+            /* %EDX = client_request ( %EAX ) */                  \
+            __asm xchg ebx,ebx                                    \
+            __asm mov _zzq_result, edx                            \
+    }                                                             \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned int __addr;                                 \
+    __asm { __SPECIAL_INSTRUCTION_PREAMBLE                        \
+            /* %EAX = guest_NRADDR */                             \
+            __asm xchg ecx,ecx                                    \
+            __asm mov __addr, eax                                 \
+    }                                                             \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_EAX ERROR
+
+#else
+#error Unsupported compiler.
+#endif
+
+#endif /* PLAT_x86_win32 */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rolq $3,  %%rdi ; rolq $13, %%rdi\n\t"      \
+                     "rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+  { volatile unsigned long long int _zzq_args[6];                 \
+    volatile unsigned long long int _zzq_result;                  \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RDX = client_request ( %RAX ) */         \
+                     "xchgq %%rbx,%%rbx"                          \
+                     : "=d" (_zzq_result)                         \
+                     : "a" (&_zzq_args[0]), "0" (_zzq_default)    \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    volatile unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %RAX = guest_NRADDR */                    \
+                     "xchgq %%rcx,%%rcx"                          \
+                     : "=a" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_CALL_NOREDIR_RAX                                 \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* call-noredir *%RAX */                     \
+                     "xchgq %%rdx,%%rdx\n\t"
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[6];                          \
+             unsigned int  _zzq_result;                           \
+             unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 3,%1\n\t" /*default*/                    \
+                     "mr 4,%2\n\t" /*ptr*/                        \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"     /*result*/                     \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_default), "b" (_zzq_ptr)         \
+                     : "cc", "memory", "r3", "r4");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    unsigned int __addr;                                          \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory", "r3"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[6];                \
+    register unsigned long long int  _zzq_result __asm__("r3");   \
+    register unsigned long long int* _zzq_ptr __asm__("r4");      \
+    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
+    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1"                                   \
+                     : "=r" (_zzq_result)                         \
+                     : "0" (_zzq_default), "r" (_zzq_ptr)         \
+                     : "cc", "memory");                           \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr __asm__("r3");         \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4"                                   \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory"                             \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+            "mov r12, r12, ror #3  ; mov r12, r12, ror #13 \n\t"  \
+            "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  { volatile unsigned int  _zzq_args[6];                          \
+    volatile unsigned int  _zzq_result;                           \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    __asm__ volatile("mov r3, %1\n\t" /*default*/                 \
+                     "mov r4, %2\n\t" /*ptr*/                     \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* R3 = client_request ( R4 ) */             \
+                     "orr r10, r10, r10\n\t"                      \
+                     "mov %0, r3"     /*result*/                  \
+                     : "=r" (_zzq_result)                         \
+                     : "r" (_zzq_default), "r" (&_zzq_args[0])    \
+                     : "cc","memory", "r3", "r4");                \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    unsigned int __addr;                                          \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* R3 = guest_NRADDR */                      \
+                     "orr r11, r11, r11\n\t"                      \
+                     "mov %0, r3"                                 \
+                     : "=r" (__addr)                              \
+                     :                                            \
+                     : "cc", "memory", "r3"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                    \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R4 */        \
+                     "orr r12, r12, r12\n\t"
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+typedef
+   struct { 
+      unsigned int nraddr; /* where's the code? */
+      unsigned int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rlwinm 0,0,3,0,0  ; rlwinm 0,0,13,0,0\n\t"  \
+                     "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned int  _zzq_args[7];                          \
+    register unsigned int  _zzq_result;                           \
+    register unsigned int* _zzq_ptr;                              \
+    _zzq_args[0] = (unsigned int)(_zzq_request);                  \
+    _zzq_args[1] = (unsigned int)(_zzq_arg1);                     \
+    _zzq_args[2] = (unsigned int)(_zzq_arg2);                     \
+    _zzq_args[3] = (unsigned int)(_zzq_arg3);                     \
+    _zzq_args[4] = (unsigned int)(_zzq_arg4);                     \
+    _zzq_args[5] = (unsigned int)(_zzq_arg5);                     \
+    _zzq_args[6] = (unsigned int)(_zzq_default);                  \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "lwz 3, 24(4)\n\t"                           \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned int __addr;                                 \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+typedef
+   struct { 
+      unsigned long long int nraddr; /* where's the code? */
+      unsigned long long int r2;  /* what tocptr do we need? */
+   }
+   OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE                            \
+                     "rotldi 0,0,3  ; rotldi 0,0,13\n\t"          \
+                     "rotldi 0,0,61 ; rotldi 0,0,51\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST(                               \
+        _zzq_rlval, _zzq_default, _zzq_request,                   \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
+                                                                  \
+  {          unsigned long long int  _zzq_args[7];                \
+    register unsigned long long int  _zzq_result;                 \
+    register unsigned long long int* _zzq_ptr;                    \
+    _zzq_args[0] = (unsigned int long long)(_zzq_request);        \
+    _zzq_args[1] = (unsigned int long long)(_zzq_arg1);           \
+    _zzq_args[2] = (unsigned int long long)(_zzq_arg2);           \
+    _zzq_args[3] = (unsigned int long long)(_zzq_arg3);           \
+    _zzq_args[4] = (unsigned int long long)(_zzq_arg4);           \
+    _zzq_args[5] = (unsigned int long long)(_zzq_arg5);           \
+    _zzq_args[6] = (unsigned int long long)(_zzq_default);        \
+    _zzq_ptr = _zzq_args;                                         \
+    __asm__ volatile("mr 4,%1\n\t"                                \
+                     "ld 3, 48(4)\n\t"                            \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = client_request ( %R4 ) */           \
+                     "or 1,1,1\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (_zzq_result)                         \
+                     : "b" (_zzq_ptr)                             \
+                     : "r3", "r4", "cc", "memory");               \
+    _zzq_rlval = _zzq_result;                                     \
+  }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
+  { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
+    register unsigned long long int __addr;                       \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR */                     \
+                     "or 2,2,2\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->nraddr = __addr;                                   \
+    __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* %R3 = guest_NRADDR_GPR2 */                \
+                     "or 4,4,4\n\t"                               \
+                     "mr %0,3"                                    \
+                     : "=b" (__addr)                              \
+                     :                                            \
+                     : "r3", "cc", "memory"                       \
+                    );                                            \
+    _zzq_orig->r2 = __addr;                                       \
+  }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                   \
+                     __SPECIAL_INSTRUCTION_PREAMBLE               \
+                     /* branch-and-link-to-noredir *%R11 */       \
+                     "or 3,3,3\n\t"
+
+#endif /* PLAT_ppc64_aix5 */
+
+/* Insert assembly code for other platforms here... */
+
+#endif /* NVALGRIND */
+
+
+/* ------------------------------------------------------------------ */
+/* PLATFORM SPECIFICS for FUNCTION WRAPPING.  This is all very        */
+/* ugly.  It's the least-worst tradeoff I can think of.               */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+   guaranteed-no-redirection macros, so as to get from function
+   wrappers to the functions they are wrapping.  The whole point is to
+   construct standard call sequences, but to do the call itself with a
+   special no-redirect call pseudo-instruction that the JIT
+   understands and handles specially.  This section is long and
+   repetitious, and I can't see a way to make it shorter.
+
+   The naming scheme is as follows:
+
+      CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+   'W' stands for "word" and 'v' for "void".  Hence there are
+   different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+   and for each, the possibility of returning a word-typed result, or
+   no result.
+*/
+
+/* Use these to write the name of your wrapper.  NOTE: duplicates
+   VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+/* Use an extra level of macroisation so as to ensure the soname/fnname
+   args are fully macro-expanded before pasting them together. */
+#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname)                    \
+   VG_CONCAT4(_vgwZU_,soname,_,fnname)
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname)                    \
+   VG_CONCAT4(_vgwZZ_,soname,_,fnname)
+
+/* Use this macro from within a wrapper function to collect the
+   context (address and possibly other info) of the original function.
+   Once you have that you can then use it in one of the CALL_FN_
+   macros.  The type of the argument _lval is OrigFn. */
+#define VALGRIND_GET_ORIG_FN(_lval)  VALGRIND_GET_NR_CONTEXT(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+   returning void. */
+
+#define CALL_FN_v_v(fnptr)                                        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1)                                  \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2)                            \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3)                      \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
+
+#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4)                \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
+
+#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5)             \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
+
+#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6)        \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
+
+#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7)   \
+   do { volatile unsigned long _junk;                             \
+        CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
+
+/* ------------------------- x86-{linux,darwin} ---------------- */
+
+#if defined(PLAT_x86_linux)  ||  defined(PLAT_x86_darwin)
+
+/* These regs are trashed by the hidden call.  No need to mention eax
+   as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $16, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $32, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "subl $12, %%esp\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "subl $8, %%esp\n\t"                                     \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "subl $4, %%esp\n\t"                                     \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "pushl 48(%%eax)\n\t"                                    \
+         "pushl 44(%%eax)\n\t"                                    \
+         "pushl 40(%%eax)\n\t"                                    \
+         "pushl 36(%%eax)\n\t"                                    \
+         "pushl 32(%%eax)\n\t"                                    \
+         "pushl 28(%%eax)\n\t"                                    \
+         "pushl 24(%%eax)\n\t"                                    \
+         "pushl 20(%%eax)\n\t"                                    \
+         "pushl 16(%%eax)\n\t"                                    \
+         "pushl 12(%%eax)\n\t"                                    \
+         "pushl 8(%%eax)\n\t"                                     \
+         "pushl 4(%%eax)\n\t"                                     \
+         "movl (%%eax), %%eax\n\t"  /* target->%eax */            \
+         VALGRIND_CALL_NOREDIR_EAX                                \
+         "addl $48, %%esp\n"                                      \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_x86_linux || PLAT_x86_darwin */
+
+/* ------------------------ amd64-{linux,darwin} --------------- */
+
+#if defined(PLAT_amd64_linux)  ||  defined(PLAT_amd64_darwin)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi",       \
+                            "rdi", "r8", "r9", "r10", "r11"
+
+/* This is all pretty complex.  It's so as to make stack unwinding
+   work reliably.  See bug 243270.  The basic problem is the sub and
+   add of 128 of %rsp in all of the following macros.  If gcc believes
+   the CFA is in %rsp, then unwinding may fail, because what's at the
+   CFA is not what gcc "expected" when it constructs the CFIs for the
+   places where the macros are instantiated.
+
+   But we can't just add a CFI annotation to increase the CFA offset
+   by 128, to match the sub of 128 from %rsp, because we don't know
+   whether gcc has chosen %rsp as the CFA at that point, or whether it
+   has chosen some other register (eg, %rbp).  In the latter case,
+   adding a CFI annotation to change the CFA offset is simply wrong.
+
+   So the solution is to get hold of the CFA using
+   __builtin_dwarf_cfa(), put it in a known register, and add a
+   CFI annotation to say what the register is.  We choose %rbp for
+   this (perhaps perversely), because:
+
+   (1) %rbp is already subject to unwinding.  If a new register was
+       chosen then the unwinder would have to unwind it in all stack
+       traces, which is expensive, and
+
+   (2) %rbp is already subject to precise exception updates in the
+       JIT.  If a new register was chosen, we'd have to have precise
+       exceptions for it too, which reduces performance of the
+       generated code.
+
+   However .. one extra complication.  We can't just whack the result
+   of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
+   list of trashed registers at the end of the inline assembly
+   fragments; gcc won't allow %rbp to appear in that list.  Hence
+   instead we need to stash %rbp in %r15 for the duration of the asm,
+   and say that %r15 is trashed instead.  gcc seems happy to go with
+   that.
+
+   Oh .. and this all needs to be conditionalised so that it is
+   unchanged from before this commit, when compiled with older gccs
+   that don't support __builtin_dwarf_cfa.  Furthermore, since
+   this header file is freestanding, it has to be independent of
+   config.h, and so the following conditionalisation cannot depend on
+   configure time checks.
+
+   Although it's not clear from
+   'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
+   this expression excludes Darwin.
+   .cfi directives in Darwin assembly appear to be completely
+   different and I haven't investigated how they work.
+
+   For even more entertainment value, note we have to use the
+   completely undocumented __builtin_dwarf_cfa(), which appears to
+   really compute the CFA, whereas __builtin_frame_address(0) claims
+   to but actually doesn't.  See
+   https://bugs.kde.org/show_bug.cgi?id=243270#c47
+*/
+#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
+#  define __FRAME_POINTER                                         \
+      ,"r"(__builtin_dwarf_cfa())
+#  define VALGRIND_CFI_PROLOGUE                                   \
+      "movq %%rbp, %%r15\n\t"                                     \
+      "movq %2, %%rbp\n\t"                                        \
+      ".cfi_remember_state\n\t"                                   \
+      ".cfi_def_cfa rbp, 0\n\t"
+#  define VALGRIND_CFI_EPILOGUE                                   \
+      "movq %%r15, %%rbp\n\t"                                     \
+      ".cfi_restore_state\n\t"
+#else
+#  define __FRAME_POINTER
+#  define VALGRIND_CFI_PROLOGUE
+#  define VALGRIND_CFI_EPILOGUE
+#endif
+
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+   long) == 8. */
+
+/* NB 9 Sept 07.  There is a nasty kludge here in all these CALL_FN_
+   macros.  In order not to trash the stack redzone, we need to drop
+   %rsp by 128 before the hidden call, and restore afterwards.  The
+   nastyness is that it is only by luck that the stack still appears
+   to be unwindable during the hidden call - since then the behaviour
+   of any routine using this macro does not match what the CFI data
+   says.  Sigh.
+
+   Why is this important?  Imagine that a wrapper has a stack
+   allocated local, and passes to the hidden call, a pointer to it.
+   Because gcc does not know about the hidden call, it may allocate
+   that local in the redzone.  Unfortunately the hidden call may then
+   trash it before it comes to use it.  So we must step clear of the
+   redzone, for the duration of the hidden call, to make it safe.
+
+   Probably the same problem afflicts the other redzone-style ABIs too
+   (ppc64-linux, ppc32-aix5, ppc64-aix5); but for those, the stack is
+   self describing (none of this CFI nonsense) so at least messing
+   with the stack pointer doesn't give a danger of non-unwindable
+   stack. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $8, %%rsp\n"                                       \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $16, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $24, %%rsp\n"                                      \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $32, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $136,%%rsp\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $40, %%rsp\n"                                      \
+         "addq $136,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         VALGRIND_CFI_PROLOGUE                                    \
+         "subq $128,%%rsp\n\t"                                    \
+         "pushq 96(%%rax)\n\t"                                    \
+         "pushq 88(%%rax)\n\t"                                    \
+         "pushq 80(%%rax)\n\t"                                    \
+         "pushq 72(%%rax)\n\t"                                    \
+         "pushq 64(%%rax)\n\t"                                    \
+         "pushq 56(%%rax)\n\t"                                    \
+         "movq 48(%%rax), %%r9\n\t"                               \
+         "movq 40(%%rax), %%r8\n\t"                               \
+         "movq 32(%%rax), %%rcx\n\t"                              \
+         "movq 24(%%rax), %%rdx\n\t"                              \
+         "movq 16(%%rax), %%rsi\n\t"                              \
+         "movq 8(%%rax), %%rdi\n\t"                               \
+         "movq (%%rax), %%rax\n\t"  /* target->%rax */            \
+         VALGRIND_CALL_NOREDIR_RAX                                \
+         "addq $48, %%rsp\n"                                      \
+         "addq $128,%%rsp\n\t"                                    \
+         VALGRIND_CFI_EPILOGUE                                    \
+         : /*out*/   "=a" (_res)                                  \
+         : /*in*/    "a" (&_argvec[0]) __FRAME_POINTER            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15"   \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+   extern int f9  ( int,int,int,int,int,int,int,int,int );
+   extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+   extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+   extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+   int g9 ( void ) {
+      return f9(11,22,33,44,55,66,77,88,99);
+   }
+   int g10 ( void ) {
+      return f10(11,22,33,44,55,66,77,88,99,110);
+   }
+   int g11 ( void ) {
+      return f11(11,22,33,44,55,66,77,88,99,110,121);
+   }
+   int g12 ( void ) {
+      return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+   }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc32-linux, 
+   sizeof(unsigned long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-16\n\t"                                       \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,16\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)arg1;                           \
+      _argvec[2] = (unsigned long)arg2;                           \
+      _argvec[3] = (unsigned long)arg3;                           \
+      _argvec[4] = (unsigned long)arg4;                           \
+      _argvec[5] = (unsigned long)arg5;                           \
+      _argvec[6] = (unsigned long)arg6;                           \
+      _argvec[7] = (unsigned long)arg7;                           \
+      _argvec[8] = (unsigned long)arg8;                           \
+      _argvec[9] = (unsigned long)arg9;                           \
+      _argvec[10] = (unsigned long)arg10;                         \
+      _argvec[11] = (unsigned long)arg11;                         \
+      _argvec[12] = (unsigned long)arg12;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "addi 1,1,-32\n\t"                                       \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,20(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,16(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,12(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,8(1)\n\t"                                         \
+         /* args1-8 */                                            \
+         "lwz 3,4(11)\n\t"   /* arg1->r3 */                       \
+         "lwz 4,8(11)\n\t"                                        \
+         "lwz 5,12(11)\n\t"                                       \
+         "lwz 6,16(11)\n\t"  /* arg4->r6 */                       \
+         "lwz 7,20(11)\n\t"                                       \
+         "lwz 8,24(11)\n\t"                                       \
+         "lwz 9,28(11)\n\t"                                       \
+         "lwz 10,32(11)\n\t" /* arg8->r10 */                      \
+         "lwz 11,0(11)\n\t"  /* target->r11 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "addi 1,1,32\n\t"                                        \
+         "mr %0,3"                                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_linux */
+
+/* ------------------------ ppc64-linux ------------------------ */
+
+#if defined(PLAT_ppc64_linux)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)" /* restore tocptr */                      \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-128\n\t"  /* expand stack frame */            \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,128"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         "std 2,-16(11)\n\t"  /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "addi 1,1,-144\n\t"  /* expand stack frame */            \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         "addi 1,1,144"     /* restore frame */                   \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_linux */
+
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
+
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[1];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[2];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",  __CALLER_SAVED_REGS         \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[4];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0\n"                                           \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[5];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[6];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #4 \n\t"                                    \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[7];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #8 \n\t"                                    \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[8];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #12 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[9];                          \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "push {r0, r1, r2, r3} \n\t"                             \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #16 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[10];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #20 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[11];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "push {r0} \n\t"                                         \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #24 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11)                          \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[12];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "push {r0, r1} \n\t"                                     \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #28 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS           \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,       \
+                                  arg6,arg7,arg8,arg9,arg10,      \
+                                  arg11,arg12)                    \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[13];                         \
+      volatile unsigned long _res;                                \
+      _argvec[0] = (unsigned long)_orig.nraddr;                   \
+      _argvec[1] = (unsigned long)(arg1);                         \
+      _argvec[2] = (unsigned long)(arg2);                         \
+      _argvec[3] = (unsigned long)(arg3);                         \
+      _argvec[4] = (unsigned long)(arg4);                         \
+      _argvec[5] = (unsigned long)(arg5);                         \
+      _argvec[6] = (unsigned long)(arg6);                         \
+      _argvec[7] = (unsigned long)(arg7);                         \
+      _argvec[8] = (unsigned long)(arg8);                         \
+      _argvec[9] = (unsigned long)(arg9);                         \
+      _argvec[10] = (unsigned long)(arg10);                       \
+      _argvec[11] = (unsigned long)(arg11);                       \
+      _argvec[12] = (unsigned long)(arg12);                       \
+      __asm__ volatile(                                           \
+         "ldr r0, [%1, #40] \n\t"                                 \
+         "ldr r1, [%1, #44] \n\t"                                 \
+         "ldr r2, [%1, #48] \n\t"                                 \
+         "push {r0, r1, r2} \n\t"                                 \
+         "ldr r0, [%1, #20] \n\t"                                 \
+         "ldr r1, [%1, #24] \n\t"                                 \
+         "ldr r2, [%1, #28] \n\t"                                 \
+         "ldr r3, [%1, #32] \n\t"                                 \
+         "ldr r4, [%1, #36] \n\t"                                 \
+         "push {r0, r1, r2, r3, r4} \n\t"                         \
+         "ldr r0, [%1, #4] \n\t"                                  \
+         "ldr r1, [%1, #8] \n\t"                                  \
+         "ldr r2, [%1, #12] \n\t"                                 \
+         "ldr r3, [%1, #16] \n\t"                                 \
+         "ldr r4, [%1] \n\t"  /* target->r4 */                    \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4                   \
+         "add sp, sp, #32 \n\t"                                   \
+         "mov %0, r0"                                             \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "0" (&_argvec[0])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_arm_linux */
+
+/* ------------------------ ppc32-aix5 ------------------------- */
+
+#if defined(PLAT_ppc32_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "lwz  3," #_n_fr "(1)\n\t"                               \
+         "stw  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-aix5, sizeof(unsigned
+   long) == 4. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t" /* arg2->r4 */                       \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(64)                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(64)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "stw  2,-8(11)\n\t"  /* save tocptr */                   \
+         "lwz  2,-4(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(72)                        \
+         /* arg12 */                                              \
+         "lwz 3,48(11)\n\t"                                       \
+         "stw 3,68(1)\n\t"                                        \
+         /* arg11 */                                              \
+         "lwz 3,44(11)\n\t"                                       \
+         "stw 3,64(1)\n\t"                                        \
+         /* arg10 */                                              \
+         "lwz 3,40(11)\n\t"                                       \
+         "stw 3,60(1)\n\t"                                        \
+         /* arg9 */                                               \
+         "lwz 3,36(11)\n\t"                                       \
+         "stw 3,56(1)\n\t"                                        \
+         /* args1-8 */                                            \
+         "lwz  3, 4(11)\n\t"  /* arg1->r3 */                      \
+         "lwz  4, 8(11)\n\t"  /* arg2->r4 */                      \
+         "lwz  5, 12(11)\n\t" /* arg3->r5 */                      \
+         "lwz  6, 16(11)\n\t" /* arg4->r6 */                      \
+         "lwz  7, 20(11)\n\t" /* arg5->r7 */                      \
+         "lwz  8, 24(11)\n\t" /* arg6->r8 */                      \
+         "lwz  9, 28(11)\n\t" /* arg7->r9 */                      \
+         "lwz 10, 32(11)\n\t" /* arg8->r10 */                     \
+         "lwz 11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "lwz 2,-8(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(72)                                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc32_aix5 */
+
+/* ------------------------ ppc64-aix5 ------------------------- */
+
+#if defined(PLAT_ppc64_aix5)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS                                       \
+   "lr", "ctr", "xer",                                            \
+   "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",        \
+   "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",   \
+   "r11", "r12", "r13"
+
+/* Expand the stack frame, copying enough info that unwinding
+   still works.  Trashes r3. */
+
+#define VG_EXPAND_FRAME_BY_trashes_r3(_n_fr)                      \
+         "addi 1,1,-" #_n_fr "\n\t"                               \
+         "ld   3," #_n_fr "(1)\n\t"                               \
+         "std  3,0(1)\n\t"
+
+#define VG_CONTRACT_FRAME_BY(_n_fr)                               \
+         "addi 1,1," #_n_fr "\n\t"
+
+/* These CALL_FN_ macros assume that on ppc64-aix5, sizeof(unsigned
+   long) == 8. */
+
+#define CALL_FN_W_v(lval, orig)                                   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+0];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1] = (unsigned long)_orig.r2;                       \
+      _argvec[2] = (unsigned long)_orig.nraddr;                   \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1)                             \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+1];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld 2,-16(11)\n\t" /* restore tocptr */                  \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+2];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3)                 \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+3];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+4];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5)        \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+5];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6)   \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+6];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7)                            \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+7];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8)                       \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+8];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,   \
+                                 arg7,arg8,arg9)                  \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+9];                        \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10)           \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+10];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(128)                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(128)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                  arg7,arg8,arg9,arg10,arg11)     \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+11];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6,  \
+                                arg7,arg8,arg9,arg10,arg11,arg12) \
+   do {                                                           \
+      volatile OrigFn        _orig = (orig);                      \
+      volatile unsigned long _argvec[3+12];                       \
+      volatile unsigned long _res;                                \
+      /* _argvec[0] holds current r2 across the call */           \
+      _argvec[1]   = (unsigned long)_orig.r2;                     \
+      _argvec[2]   = (unsigned long)_orig.nraddr;                 \
+      _argvec[2+1] = (unsigned long)arg1;                         \
+      _argvec[2+2] = (unsigned long)arg2;                         \
+      _argvec[2+3] = (unsigned long)arg3;                         \
+      _argvec[2+4] = (unsigned long)arg4;                         \
+      _argvec[2+5] = (unsigned long)arg5;                         \
+      _argvec[2+6] = (unsigned long)arg6;                         \
+      _argvec[2+7] = (unsigned long)arg7;                         \
+      _argvec[2+8] = (unsigned long)arg8;                         \
+      _argvec[2+9] = (unsigned long)arg9;                         \
+      _argvec[2+10] = (unsigned long)arg10;                       \
+      _argvec[2+11] = (unsigned long)arg11;                       \
+      _argvec[2+12] = (unsigned long)arg12;                       \
+      __asm__ volatile(                                           \
+         "mr 11,%1\n\t"                                           \
+         VG_EXPAND_FRAME_BY_trashes_r3(512)                       \
+         "std  2,-16(11)\n\t" /* save tocptr */                   \
+         "ld   2,-8(11)\n\t"  /* use nraddr's tocptr */           \
+         VG_EXPAND_FRAME_BY_trashes_r3(144)                       \
+         /* arg12 */                                              \
+         "ld  3,96(11)\n\t"                                       \
+         "std 3,136(1)\n\t"                                       \
+         /* arg11 */                                              \
+         "ld  3,88(11)\n\t"                                       \
+         "std 3,128(1)\n\t"                                       \
+         /* arg10 */                                              \
+         "ld  3,80(11)\n\t"                                       \
+         "std 3,120(1)\n\t"                                       \
+         /* arg9 */                                               \
+         "ld  3,72(11)\n\t"                                       \
+         "std 3,112(1)\n\t"                                       \
+         /* args1-8 */                                            \
+         "ld   3, 8(11)\n\t"  /* arg1->r3 */                      \
+         "ld   4, 16(11)\n\t" /* arg2->r4 */                      \
+         "ld   5, 24(11)\n\t" /* arg3->r5 */                      \
+         "ld   6, 32(11)\n\t" /* arg4->r6 */                      \
+         "ld   7, 40(11)\n\t" /* arg5->r7 */                      \
+         "ld   8, 48(11)\n\t" /* arg6->r8 */                      \
+         "ld   9, 56(11)\n\t" /* arg7->r9 */                      \
+         "ld  10, 64(11)\n\t" /* arg8->r10 */                     \
+         "ld  11, 0(11)\n\t"  /* target->r11 */                   \
+         VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11                  \
+         "mr 11,%1\n\t"                                           \
+         "mr %0,3\n\t"                                            \
+         "ld  2,-16(11)\n\t" /* restore tocptr */                 \
+         VG_CONTRACT_FRAME_BY(144)                                \
+         VG_CONTRACT_FRAME_BY(512)                                \
+         : /*out*/   "=r" (_res)                                  \
+         : /*in*/    "r" (&_argvec[2])                            \
+         : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS          \
+      );                                                          \
+      lval = (__typeof__(lval)) _res;                             \
+   } while (0)
+
+#endif /* PLAT_ppc64_aix5 */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS.               */
+/*                                                                    */
+/* ------------------------------------------------------------------ */
+
+/* Some request codes.  There are many more of these, but most are not
+   exposed to end-user view.  These are the public ones, all of the
+   form 0x1000 + small_number.
+
+   Core ones are in the range 0x00000000--0x0000ffff.  The non-public
+   ones start at 0x2000.
+*/
+
+/* These macros are used by tools -- they must be public, but don't
+   embed them into other programs. */
+#define VG_USERREQ_TOOL_BASE(a,b) \
+   ((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
+#define VG_IS_TOOL_USERREQ(a, b, v) \
+   (VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
+
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
+   This enum comprises an ABI exported by Valgrind to programs
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
+typedef
+   enum { VG_USERREQ__RUNNING_ON_VALGRIND  = 0x1001,
+          VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+          /* These allow any function to be called from the simulated
+             CPU but run on the real CPU.  Nb: the first arg passed to
+             the function is always the ThreadId of the running
+             thread!  So CLIENT_CALL0 actually requires a 1 arg
+             function, etc. */
+          VG_USERREQ__CLIENT_CALL0 = 0x1101,
+          VG_USERREQ__CLIENT_CALL1 = 0x1102,
+          VG_USERREQ__CLIENT_CALL2 = 0x1103,
+          VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+          /* Can be useful in regression testing suites -- eg. can
+             send Valgrind's output to /dev/null and still count
+             errors. */
+          VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+          /* These are useful and can be interpreted by any tool that
+             tracks malloc() et al, by using vg_replace_malloc.c. */
+          VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+          VG_USERREQ__FREELIKE_BLOCK   = 0x1302,
+          /* Memory pool support. */
+          VG_USERREQ__CREATE_MEMPOOL   = 0x1303,
+          VG_USERREQ__DESTROY_MEMPOOL  = 0x1304,
+          VG_USERREQ__MEMPOOL_ALLOC    = 0x1305,
+          VG_USERREQ__MEMPOOL_FREE     = 0x1306,
+          VG_USERREQ__MEMPOOL_TRIM     = 0x1307,
+          VG_USERREQ__MOVE_MEMPOOL     = 0x1308,
+          VG_USERREQ__MEMPOOL_CHANGE   = 0x1309,
+          VG_USERREQ__MEMPOOL_EXISTS   = 0x130a,
+
+          /* Allow printfs to valgrind log. */
+          /* The first two pass the va_list argument by value, which
+             assumes it is the same size as or smaller than a UWord,
+             which generally isn't the case.  Hence are deprecated.
+             The second two pass the vargs by reference and so are
+             immune to this problem. */
+          /* both :: char* fmt, va_list vargs (DEPRECATED) */
+          VG_USERREQ__PRINTF           = 0x1401,
+          VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+          /* both :: char* fmt, va_list* vargs */
+          VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
+          VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
+
+          /* Stack support. */
+          VG_USERREQ__STACK_REGISTER   = 0x1501,
+          VG_USERREQ__STACK_DEREGISTER = 0x1502,
+          VG_USERREQ__STACK_CHANGE     = 0x1503,
+
+          /* Wine support */
+          VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
+
+          /* Querying of debug info. */
+          VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701
+   } Vg_ClientRequest;
+
+#if !defined(__GNUC__)
+#  define __extension__ /* */
+#endif
+
+
+/*
+ * VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
+ * client request and whose value equals the client request result.
+ */
+
+#if defined(NVALGRIND)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                               \
+        _zzq_default, _zzq_request,                                    \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)         \
+   (_zzq_default)
+
+#else /*defined(NVALGRIND)*/
+
+#if defined(_MSC_VER)
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                                \
+        _zzq_default, _zzq_request,                                     \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)          \
+   (vg_VALGRIND_DO_CLIENT_REQUEST_EXPR((uintptr_t)(_zzq_default),       \
+        (_zzq_request), (uintptr_t)(_zzq_arg1), (uintptr_t)(_zzq_arg2), \
+        (uintptr_t)(_zzq_arg3), (uintptr_t)(_zzq_arg4),                 \
+        (uintptr_t)(_zzq_arg5)))
+
+static __inline unsigned
+vg_VALGRIND_DO_CLIENT_REQUEST_EXPR(uintptr_t _zzq_default,
+                                   unsigned _zzq_request, uintptr_t _zzq_arg1,
+                                   uintptr_t _zzq_arg2, uintptr_t _zzq_arg3,
+                                   uintptr_t _zzq_arg4, uintptr_t _zzq_arg5)
+{
+    unsigned _zzq_rlval;
+    VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request,
+                      _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5);
+    return _zzq_rlval;
+}
+
+#else /*defined(_MSC_VER)*/
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR(                               \
+        _zzq_default, _zzq_request,                                    \
+        _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)         \
+   (__extension__({unsigned int _zzq_rlval;                            \
+    VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, _zzq_request, \
+                _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+    _zzq_rlval;                                                        \
+   }))
+
+#endif /*defined(_MSC_VER)*/
+
+#endif /*defined(NVALGRIND)*/
+
+
+/* Returns the number of Valgrinds this code is running under.  That
+   is, 0 if running natively, 1 if running under Valgrind, 2 if
+   running under Valgrind which is running under another Valgrind,
+   etc. */
+#define RUNNING_ON_VALGRIND                                           \
+    VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */,                   \
+                                    VG_USERREQ__RUNNING_ON_VALGRIND,  \
+                                    0, 0, 0, 0, 0)                    \
+
+
+/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
+   _qzz_len - 1].  Useful if you are debugging a JITter or some such,
+   since it provides a way to make sure valgrind will retranslate the
+   invalidated area.  Returns no value. */
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DISCARD_TRANSLATIONS,  \
+                               _qzz_addr, _qzz_len, 0, 0, 0);     \
+   }
+
+
+/* These requests are for getting Valgrind itself to print something.
+   Possibly with a backtrace.  This is a really ugly hack.  The return value
+   is the number of characters printed, excluding the "**<pid>** " part at the
+   start and the backtrace (if present). */
+
+#if defined(NVALGRIND)
+
+#  define VALGRIND_PRINTF(...)
+#  define VALGRIND_PRINTF_BACKTRACE(...)
+
+#else /* NVALGRIND */
+
+#if !defined(_MSC_VER)
+/* Modern GCC will optimize the static routine out if unused,
+   and unused attribute will shut down warnings about it.  */
+static int VALGRIND_PRINTF(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+#if defined(_MSC_VER)
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
+                              0, 0, 0);
+#else
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+#if !defined(_MSC_VER)
+static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+   __attribute__((format(__printf__, 1, 2), __unused__));
+#endif
+static int
+#if defined(_MSC_VER)
+__inline
+#endif
+VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+{
+   unsigned long _qzz_res;
+   va_list vargs;
+   va_start(vargs, format);
+#if defined(_MSC_VER)
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (uintptr_t)format,
+                              (uintptr_t)&vargs,
+                              0, 0, 0);
+#else
+   VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,
+                              VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
+                              (unsigned long)format,
+                              (unsigned long)&vargs, 
+                              0, 0, 0);
+#endif
+   va_end(vargs);
+   return (int)_qzz_res;
+}
+
+#endif /* NVALGRIND */
+
+
+/* These requests allow control to move from the simulated CPU to the
+   real CPU, calling an arbitary function.
+   
+   Note that the current ThreadId is inserted as the first argument.
+   So this call:
+
+     VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
+
+   requires f to have this signature:
+
+     Word f(Word tid, Word arg1, Word arg2)
+
+   where "Word" is a word-sized type.
+
+   Note that these client requests are not entirely reliable.  For example,
+   if you call a function with them that subsequently calls printf(),
+   there's a high chance Valgrind will crash.  Generally, your prospects of
+   these working are made higher if the called function does not refer to
+   any global variables, and does not refer to any libc or other functions
+   (printf et al).  Any kind of entanglement with libc or dynamic linking is
+   likely to have a bad outcome, for tricky reasons which we've grappled
+   with a lot in the past.
+*/
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn)                          \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL0,          \
+                               _qyy_fn,                           \
+                               0, 0, 0, 0);                       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1)               \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL1,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, 0, 0, 0);               \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2)    \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL2,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2, 0, 0);       \
+    _qyy_res;                                                     \
+   })
+
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+   __extension__                                                  \
+   ({unsigned long _qyy_res;                                      \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__CLIENT_CALL3,          \
+                               _qyy_fn,                           \
+                               _qyy_arg1, _qyy_arg2,              \
+                               _qyy_arg3, 0);                     \
+    _qyy_res;                                                     \
+   })
+
+
+/* Counts the number of errors that have been recorded by a tool.  Nb:
+   the tool must record the errors with VG_(maybe_record_error)() or
+   VG_(unique_error)() for them to be counted. */
+#define VALGRIND_COUNT_ERRORS                                     \
+   __extension__                                                  \
+   ({unsigned int _qyy_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */,  \
+                               VG_USERREQ__COUNT_ERRORS,          \
+                               0, 0, 0, 0, 0);                    \
+    _qyy_res;                                                     \
+   })
+
+/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
+   when heap blocks are allocated in order to give accurate results.  This
+   happens automatically for the standard allocator functions such as
+   malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
+   delete[], etc.
+
+   But if your program uses a custom allocator, this doesn't automatically
+   happen, and Valgrind will not do as well.  For example, if you allocate
+   superblocks with mmap() and then allocates chunks of the superblocks, all
+   Valgrind's observations will be at the mmap() level and it won't know that
+   the chunks should be considered separate entities.  In Memcheck's case,
+   that means you probably won't get heap block overrun detection (because
+   there won't be redzones marked as unaddressable) and you definitely won't
+   get any leak detection.
+
+   The following client requests allow a custom allocator to be annotated so
+   that it can be handled accurately by Valgrind.
+
+   VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
+   by a malloc()-like function.  For Memcheck (an illustrative case), this
+   does two things:
+
+   - It records that the block has been allocated.  This means any addresses
+     within the block mentioned in error messages will be
+     identified as belonging to the block.  It also means that if the block
+     isn't freed it will be detected by the leak checker.
+
+   - It marks the block as being addressable and undefined (if 'is_zeroed' is
+     not set), or addressable and defined (if 'is_zeroed' is set).  This
+     controls how accesses to the block by the program are handled.
+   
+   'addr' is the start of the usable block (ie. after any
+   redzone), 'sizeB' is its size.  'rzB' is the redzone size if the allocator
+   can apply redzones -- these are blocks of padding at the start and end of
+   each block.  Adding redzones is recommended as it makes it much more likely
+   Valgrind will spot block overruns.  `is_zeroed' indicates if the memory is
+   zeroed (or filled with another predictable value), as is the case for
+   calloc().
+   
+   VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
+   heap block -- that will be used by the client program -- is allocated.
+   It's best to put it at the outermost level of the allocator if possible;
+   for example, if you have a function my_alloc() which calls
+   internal_alloc(), and the client request is put inside internal_alloc(),
+   stack traces relating to the heap block will contain entries for both
+   my_alloc() and internal_alloc(), which is probably not what you want.
+
+   For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
+   custom blocks from within a heap block, B, that has been allocated with
+   malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
+   -- the custom blocks will take precedence.
+
+   VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK.  For
+   Memcheck, it does two things:
+
+   - It records that the block has been deallocated.  This assumes that the
+     block was annotated as having been allocated via
+     VALGRIND_MALLOCLIKE_BLOCK.  Otherwise, an error will be issued.
+
+   - It marks the block as being unaddressable.
+
+   VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
+   heap block is deallocated.
+
+   In many cases, these two client requests will not be enough to get your
+   allocator working well with Memcheck.  More specifically, if your allocator
+   writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
+   will be necessary to mark the memory as addressable just before the zeroing
+   occurs, otherwise you'll get a lot of invalid write errors.  For example,
+   you'll need to do this if your allocator recycles freed blocks, but it
+   zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
+   Alternatively, if your allocator reuses freed blocks for allocator-internal
+   data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
+
+   Really, what's happening is a blurring of the lines between the client
+   program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
+   memory should be considered unaddressable to the client program, but the
+   allocator knows more than the rest of the client program and so may be able
+   to safely access it.  Extra client requests are necessary for Valgrind to
+   understand the distinction between the allocator and the rest of the
+   program.
+
+   Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request;  it
+   has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
+   
+   Ignored if addr == 0.
+*/
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)    \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MALLOCLIKE_BLOCK,      \
+                               addr, sizeB, rzB, is_zeroed, 0);   \
+   }
+
+/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
+   Ignored if addr == 0.
+*/
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB)                        \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__FREELIKE_BLOCK,        \
+                               addr, rzB, 0, 0, 0);               \
+   }
+
+/* Create a memory pool. */
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed)             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__CREATE_MEMPOOL,        \
+                               pool, rzB, is_zeroed, 0, 0);       \
+   }
+
+/* Destroy a memory pool. */
+#define VALGRIND_DESTROY_MEMPOOL(pool)                            \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__DESTROY_MEMPOOL,       \
+                               pool, 0, 0, 0, 0);                 \
+   }
+
+/* Associate a piece of memory with a memory pool. */
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size)                  \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_ALLOC,         \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Disassociate a piece of memory from a memory pool. */
+#define VALGRIND_MEMPOOL_FREE(pool, addr)                         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_FREE,          \
+                               pool, addr, 0, 0, 0);              \
+   }
+
+/* Disassociate any pieces outside a particular range. */
+#define VALGRIND_MEMPOOL_TRIM(pool, addr, size)                   \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_TRIM,          \
+                               pool, addr, size, 0, 0);           \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MOVE_MEMPOOL(poolA, poolB)                       \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MOVE_MEMPOOL,          \
+                               poolA, poolB, 0, 0, 0);            \
+   }
+
+/* Resize and/or move a piece associated with a memory pool. */
+#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size)         \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_CHANGE,        \
+                               pool, addrA, addrB, size, 0);      \
+   }
+
+/* Return 1 if a mempool exists, else 0. */
+#define VALGRIND_MEMPOOL_EXISTS(pool)                             \
+   __extension__                                                  \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MEMPOOL_EXISTS,        \
+                               pool, 0, 0, 0, 0);                 \
+    _qzz_res;                                                     \
+   })
+
+/* Mark a piece of memory as being a stack. Returns a stack id. */
+#define VALGRIND_STACK_REGISTER(start, end)                       \
+   __extension__                                                  \
+   ({unsigned int _qzz_res;                                       \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_REGISTER,        \
+                               start, end, 0, 0, 0);              \
+    _qzz_res;                                                     \
+   })
+
+/* Unmark the piece of memory associated with a stack id as being a
+   stack. */
+#define VALGRIND_STACK_DEREGISTER(id)                             \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_DEREGISTER,      \
+                               id, 0, 0, 0, 0);                   \
+   }
+
+/* Change the start and end address of the stack id. */
+#define VALGRIND_STACK_CHANGE(id, start, end)                     \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__STACK_CHANGE,          \
+                               id, start, end, 0, 0);             \
+   }
+
+/* Load PDB debug info for Wine PE image_map. */
+#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta)   \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__LOAD_PDB_DEBUGINFO,    \
+                               fd, ptr, total_size, delta, 0);    \
+   }
+
+/* Map a code address to a source file name and line number.  buf64
+   must point to a 64-byte buffer in the caller's address space.  The
+   result will be dumped in there and is guaranteed to be zero
+   terminated.  If no info is found, the first byte is set to zero. */
+#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64)                    \
+   {unsigned int _qzz_res;                                        \
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                       \
+                               VG_USERREQ__MAP_IP_TO_SRCLOC,      \
+                               addr, buf64, 0, 0, 0);             \
+   }
+
+
+#undef PLAT_x86_linux
+#undef PLAT_amd64_linux
+#undef PLAT_ppc32_linux
+#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
+#undef PLAT_ppc32_aix5
+#undef PLAT_ppc64_aix5
+
+#endif   /* __VALGRIND_H */
+
+#endif
diff --git a/base/threading/non_thread_safe_unittest.cc b/base/threading/non_thread_safe_unittest.cc
index 2a27c3f..d523fc5 100644
--- a/base/threading/non_thread_safe_unittest.cc
+++ b/base/threading/non_thread_safe_unittest.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/threading/non_thread_safe.h"
+
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/non_thread_safe.h"
 #include "base/threading/simple_thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -72,7 +74,7 @@
   void Run() override { non_thread_safe_class_.reset(); }
 
  private:
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class_;
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class_;
 
   DISALLOW_COPY_AND_ASSIGN(DeleteNonThreadSafeClassOnThread);
 };
@@ -80,7 +82,7 @@
 }  // namespace
 
 TEST(NonThreadSafeTest, CallsAllowedOnSameThread) {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that DoStuff doesn't assert.
@@ -91,7 +93,7 @@
 }
 
 TEST(NonThreadSafeTest, DetachThenDestructOnDifferentThread) {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that the destructor doesn't assert when called on a different thread
@@ -107,7 +109,7 @@
 #if GTEST_HAS_DEATH_TEST || !ENABLE_NON_THREAD_SAFE
 
 void NonThreadSafeClass::MethodOnDifferentThreadImpl() {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that DoStuff asserts in debug builds only when called
@@ -131,7 +133,7 @@
 #endif  // ENABLE_NON_THREAD_SAFE
 
 void NonThreadSafeClass::DestructorOnDifferentThreadImpl() {
-  scoped_ptr<NonThreadSafeClass> non_thread_safe_class(
+  std::unique_ptr<NonThreadSafeClass> non_thread_safe_class(
       new NonThreadSafeClass);
 
   // Verify that the destructor asserts in debug builds only
diff --git a/base/threading/platform_thread_internal_posix.cc b/base/threading/platform_thread_internal_posix.cc
index 9af0204..378a24d 100644
--- a/base/threading/platform_thread_internal_posix.cc
+++ b/base/threading/platform_thread_internal_posix.cc
@@ -4,6 +4,7 @@
 
 #include "base/threading/platform_thread_internal_posix.h"
 
+#include "base/containers/adapters.h"
 #include "base/logging.h"
 
 namespace base {
@@ -11,8 +12,7 @@
 namespace internal {
 
 int ThreadPriorityToNiceValue(ThreadPriority priority) {
-  for (const ThreadPriorityToNiceValuePair& pair :
-       kThreadPriorityToNiceValueMap) {
+  for (const auto& pair : kThreadPriorityToNiceValueMap) {
     if (pair.priority == priority)
       return pair.nice_value;
   }
@@ -21,13 +21,17 @@
 }
 
 ThreadPriority NiceValueToThreadPriority(int nice_value) {
-  for (const ThreadPriorityToNiceValuePair& pair :
-       kThreadPriorityToNiceValueMap) {
-    if (pair.nice_value == nice_value)
+  // Try to find a priority that best describes |nice_value|. If there isn't
+  // an exact match, this method returns the closest priority whose nice value
+  // is higher (lower priority) than |nice_value|.
+  for (const auto& pair : Reversed(kThreadPriorityToNiceValueMap)) {
+    if (pair.nice_value >= nice_value)
       return pair.priority;
   }
-  NOTREACHED() << "Unknown nice value";
-  return ThreadPriority::NORMAL;
+
+  // Reaching here means |nice_value| is more than any of the defined
+  // priorities. The lowest priority is suitable in this case.
+  return ThreadPriority::BACKGROUND;
 }
 
 }  // namespace internal
diff --git a/base/threading/platform_thread_internal_posix.h b/base/threading/platform_thread_internal_posix.h
index 05a8d1e..5f4a215 100644
--- a/base/threading/platform_thread_internal_posix.h
+++ b/base/threading/platform_thread_internal_posix.h
@@ -5,6 +5,7 @@
 #ifndef BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
 #define BASE_THREADING_PLATFORM_THREAD_INTERNAL_POSIX_H_
 
+#include "base/base_export.h"
 #include "base/threading/platform_thread.h"
 
 namespace base {
@@ -15,7 +16,11 @@
   ThreadPriority priority;
   int nice_value;
 };
-extern const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4];
+// The elements must be listed in the order of increasing priority (lowest
+// priority first), that is, in the order of decreasing nice values (highest
+// nice value first).
+BASE_EXPORT extern
+const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4];
 
 // Returns the nice value matching |priority| based on the platform-specific
 // implementation of kThreadPriorityToNiceValueMap.
@@ -23,7 +28,7 @@
 
 // Returns the ThreadPrioirty matching |nice_value| based on the platform-
 // specific implementation of kThreadPriorityToNiceValueMap.
-ThreadPriority NiceValueToThreadPriority(int nice_value);
+BASE_EXPORT ThreadPriority NiceValueToThreadPriority(int nice_value);
 
 // Allows platform specific tweaks to the generic POSIX solution for
 // SetCurrentThreadPriority. Returns true if the platform-specific
diff --git a/base/threading/platform_thread_linux.cc b/base/threading/platform_thread_linux.cc
index 7e2365c..ab7c97e 100644
--- a/base/threading/platform_thread_linux.cc
+++ b/base/threading/platform_thread_linux.cc
@@ -94,7 +94,7 @@
 
 void TerminateOnThread() {}
 
-size_t GetDefaultThreadStackSize(const pthread_attr_t& /* attributes */) {
+size_t GetDefaultThreadStackSize(const pthread_attr_t& /*attributes*/) {
 #if !defined(THREAD_SANITIZER)
   return 0;
 #else
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index bd6ae2d..d8bcf92 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -12,9 +12,10 @@
 #include <sys/resource.h>
 #include <sys/time.h>
 
+#include <memory>
+
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/threading/platform_thread_internal_posix.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/threading/thread_restrictions.h"
@@ -47,7 +48,8 @@
   PlatformThread::Delegate* delegate = nullptr;
 
   {
-    scoped_ptr<ThreadParams> thread_params(static_cast<ThreadParams*>(params));
+    std::unique_ptr<ThreadParams> thread_params(
+        static_cast<ThreadParams*>(params));
 
     delegate = thread_params->delegate;
     if (!thread_params->joinable)
@@ -98,7 +100,7 @@
   if (stack_size > 0)
     pthread_attr_setstacksize(&attributes, stack_size);
 
-  scoped_ptr<ThreadParams> params(new ThreadParams);
+  std::unique_ptr<ThreadParams> params(new ThreadParams);
   params->delegate = delegate;
   params->joinable = joinable;
   params->priority = priority;
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index 6738775..82221e1 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -14,6 +14,7 @@
 #if defined(OS_POSIX)
 #include <sys/types.h>
 #include <unistd.h>
+#include "base/threading/platform_thread_internal_posix.h"
 #elif defined(OS_WIN)
 #include <windows.h>
 #endif
@@ -271,4 +272,64 @@
   }
 }
 
+// Test for a function defined in platform_thread_internal_posix.cc. On OSX and
+// iOS, platform_thread_internal_posix.cc is not compiled, so these platforms
+// are excluded here, too.
+#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_IOS)
+TEST(PlatformThreadTest, GetNiceValueToThreadPriority) {
+  using internal::NiceValueToThreadPriority;
+  using internal::kThreadPriorityToNiceValueMap;
+
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            kThreadPriorityToNiceValueMap[0].priority);
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            kThreadPriorityToNiceValueMap[1].priority);
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            kThreadPriorityToNiceValueMap[2].priority);
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            kThreadPriorityToNiceValueMap[3].priority);
+
+  static const int kBackgroundNiceValue =
+      kThreadPriorityToNiceValueMap[0].nice_value;
+  static const int kNormalNiceValue =
+      kThreadPriorityToNiceValueMap[1].nice_value;
+  static const int kDisplayNiceValue =
+      kThreadPriorityToNiceValueMap[2].nice_value;
+  static const int kRealtimeAudioNiceValue =
+      kThreadPriorityToNiceValueMap[3].nice_value;
+
+  // The tests below assume the nice values specified in the map are within
+  // the range below (both ends exclusive).
+  static const int kHighestNiceValue = 19;
+  static const int kLowestNiceValue = -20;
+
+  EXPECT_GT(kHighestNiceValue, kBackgroundNiceValue);
+  EXPECT_GT(kBackgroundNiceValue, kNormalNiceValue);
+  EXPECT_GT(kNormalNiceValue, kDisplayNiceValue);
+  EXPECT_GT(kDisplayNiceValue, kRealtimeAudioNiceValue);
+  EXPECT_GT(kRealtimeAudioNiceValue, kLowestNiceValue);
+
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kHighestNiceValue));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kBackgroundNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kBackgroundNiceValue));
+  EXPECT_EQ(ThreadPriority::BACKGROUND,
+            NiceValueToThreadPriority(kNormalNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            NiceValueToThreadPriority(kNormalNiceValue));
+  EXPECT_EQ(ThreadPriority::NORMAL,
+            NiceValueToThreadPriority(kDisplayNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            NiceValueToThreadPriority(kDisplayNiceValue));
+  EXPECT_EQ(ThreadPriority::DISPLAY,
+            NiceValueToThreadPriority(kRealtimeAudioNiceValue + 1));
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            NiceValueToThreadPriority(kRealtimeAudioNiceValue));
+  EXPECT_EQ(ThreadPriority::REALTIME_AUDIO,
+            NiceValueToThreadPriority(kLowestNiceValue));
+}
+#endif
+
 }  // namespace base
diff --git a/base/threading/post_task_and_reply_impl.cc b/base/threading/post_task_and_reply_impl.cc
index 80ca520..c906866 100644
--- a/base/threading/post_task_and_reply_impl.cc
+++ b/base/threading/post_task_and_reply_impl.cc
@@ -7,7 +7,7 @@
 #include "base/bind.h"
 #include "base/location.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
diff --git a/base/threading/sequenced_task_runner_handle.cc b/base/threading/sequenced_task_runner_handle.cc
new file mode 100644
index 0000000..2c3af32
--- /dev/null
+++ b/base/threading/sequenced_task_runner_handle.cc
@@ -0,0 +1,69 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/sequenced_task_runner_handle.h"
+
+#include <utility>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/sequenced_task_runner.h"
+#include "base/threading/sequenced_worker_pool.h"
+#include "base/threading/thread_local.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+namespace base {
+
+namespace {
+
+base::LazyInstance<base::ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
+    lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+// static
+scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
+  // Return the registered SequencedTaskRunner, if any.
+  const SequencedTaskRunnerHandle* handle = lazy_tls_ptr.Pointer()->Get();
+  if (handle) {
+    // Various modes of setting SequencedTaskRunnerHandle don't combine.
+    DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+    DCHECK(!SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread());
+    return handle->task_runner_;
+  }
+
+  // Return the SequencedTaskRunner obtained from SequencedWorkerPool, if any.
+  scoped_refptr<base::SequencedTaskRunner> task_runner =
+      SequencedWorkerPool::GetSequencedTaskRunnerForCurrentThread();
+  if (task_runner) {
+    DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
+    return task_runner;
+  }
+
+  // Return the SingleThreadTaskRunner for the current thread otherwise.
+  return base::ThreadTaskRunnerHandle::Get();
+}
+
+// static
+bool SequencedTaskRunnerHandle::IsSet() {
+  return lazy_tls_ptr.Pointer()->Get() ||
+         SequencedWorkerPool::GetWorkerPoolForCurrentThread() ||
+         base::ThreadTaskRunnerHandle::IsSet();
+}
+
+SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
+    scoped_refptr<SequencedTaskRunner> task_runner)
+    : task_runner_(std::move(task_runner)) {
+  DCHECK(task_runner_->RunsTasksOnCurrentThread());
+  DCHECK(!SequencedTaskRunnerHandle::IsSet());
+  lazy_tls_ptr.Pointer()->Set(this);
+}
+
+SequencedTaskRunnerHandle::~SequencedTaskRunnerHandle() {
+  DCHECK(task_runner_->RunsTasksOnCurrentThread());
+  DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
+  lazy_tls_ptr.Pointer()->Set(nullptr);
+}
+
+}  // namespace base
diff --git a/base/threading/sequenced_task_runner_handle.h b/base/threading/sequenced_task_runner_handle.h
new file mode 100644
index 0000000..e6da18d
--- /dev/null
+++ b/base/threading/sequenced_task_runner_handle.h
@@ -0,0 +1,46 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class SequencedTaskRunner;
+
+class BASE_EXPORT SequencedTaskRunnerHandle {
+ public:
+  // Returns a SequencedTaskRunner which guarantees that posted tasks will only
+  // run after the current task is finished and will satisfy a SequenceChecker.
+  // It should only be called if IsSet() returns true (see the comment there for
+  // the requirements).
+  static scoped_refptr<SequencedTaskRunner> Get();
+
+  // Returns true if one of the following conditions is fulfilled:
+  // a) A SequencedTaskRunner has been assigned to the current thread by
+  //    instantiating a SequencedTaskRunnerHandle.
+  // b) The current thread has a ThreadTaskRunnerHandle (which includes any
+  //    thread that has a MessageLoop associated with it), or
+  // c) The current thread is a worker thread belonging to a
+  //    SequencedWorkerPool.
+  static bool IsSet();
+
+  // Binds |task_runner| to the current thread.
+  explicit SequencedTaskRunnerHandle(
+      scoped_refptr<SequencedTaskRunner> task_runner);
+  ~SequencedTaskRunnerHandle();
+
+ private:
+  scoped_refptr<SequencedTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(SequencedTaskRunnerHandle);
+};
+
+}  // namespace base
+
+#endif  // BASE_THREADING_SEQUENCED_TASK_RUNNER_HANDLE_H_
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index 081a49f..57961b5 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -8,6 +8,7 @@
 
 #include <list>
 #include <map>
+#include <memory>
 #include <set>
 #include <utility>
 #include <vector>
@@ -19,17 +20,18 @@
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/simple_thread.h"
 #include "base/threading/thread_local.h"
 #include "base/threading/thread_restrictions.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/time/time.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/trace_event.h"
 #include "base/tracked_objects.h"
 #include "build/build_config.h"
@@ -98,7 +100,7 @@
 class SequencedWorkerPoolTaskRunner : public TaskRunner {
  public:
   SequencedWorkerPoolTaskRunner(
-      const scoped_refptr<SequencedWorkerPool>& pool,
+      scoped_refptr<SequencedWorkerPool> pool,
       SequencedWorkerPool::WorkerShutdown shutdown_behavior);
 
   // TaskRunner implementation
@@ -118,11 +120,9 @@
 };
 
 SequencedWorkerPoolTaskRunner::SequencedWorkerPoolTaskRunner(
-    const scoped_refptr<SequencedWorkerPool>& pool,
+    scoped_refptr<SequencedWorkerPool> pool,
     SequencedWorkerPool::WorkerShutdown shutdown_behavior)
-    : pool_(pool),
-      shutdown_behavior_(shutdown_behavior) {
-}
+    : pool_(std::move(pool)), shutdown_behavior_(shutdown_behavior) {}
 
 SequencedWorkerPoolTaskRunner::~SequencedWorkerPoolTaskRunner() {
 }
@@ -131,7 +131,7 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  if (delay == TimeDelta()) {
+  if (delay.is_zero()) {
     return pool_->PostWorkerTaskWithShutdownBehavior(
         from_here, task, shutdown_behavior_);
   }
@@ -150,7 +150,7 @@
 class SequencedWorkerPoolSequencedTaskRunner : public SequencedTaskRunner {
  public:
   SequencedWorkerPoolSequencedTaskRunner(
-      const scoped_refptr<SequencedWorkerPool>& pool,
+      scoped_refptr<SequencedWorkerPool> pool,
       SequencedWorkerPool::SequenceToken token,
       SequencedWorkerPool::WorkerShutdown shutdown_behavior);
 
@@ -178,13 +178,12 @@
 };
 
 SequencedWorkerPoolSequencedTaskRunner::SequencedWorkerPoolSequencedTaskRunner(
-    const scoped_refptr<SequencedWorkerPool>& pool,
+    scoped_refptr<SequencedWorkerPool> pool,
     SequencedWorkerPool::SequenceToken token,
     SequencedWorkerPool::WorkerShutdown shutdown_behavior)
-    : pool_(pool),
+    : pool_(std::move(pool)),
       token_(token),
-      shutdown_behavior_(shutdown_behavior) {
-}
+      shutdown_behavior_(shutdown_behavior) {}
 
 SequencedWorkerPoolSequencedTaskRunner::
 ~SequencedWorkerPoolSequencedTaskRunner() {
@@ -194,7 +193,7 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  if (delay == TimeDelta()) {
+  if (delay.is_zero()) {
     return pool_->PostSequencedWorkerTaskWithShutdownBehavior(
         token_, from_here, task, shutdown_behavior_);
   }
@@ -230,7 +229,7 @@
  public:
   // Hold a (cyclic) ref to |worker_pool|, since we want to keep it
   // around as long as we are running.
-  Worker(const scoped_refptr<SequencedWorkerPool>& worker_pool,
+  Worker(scoped_refptr<SequencedWorkerPool> worker_pool,
          int thread_number,
          const std::string& thread_name_prefix);
   ~Worker() override;
@@ -448,7 +447,7 @@
   // Owning pointers to all threads we've created so far, indexed by
   // ID. Since we lazily create threads, this may be less than
   // max_threads_ and will be initially empty.
-  using ThreadMap = std::map<PlatformThreadId, scoped_ptr<Worker>>;
+  using ThreadMap = std::map<PlatformThreadId, std::unique_ptr<Worker>>;
   ThreadMap threads_;
 
   // Set to true when we're in the process of creating another thread.
@@ -504,11 +503,11 @@
 // Worker definitions ---------------------------------------------------------
 
 SequencedWorkerPool::Worker::Worker(
-    const scoped_refptr<SequencedWorkerPool>& worker_pool,
+    scoped_refptr<SequencedWorkerPool> worker_pool,
     int thread_number,
     const std::string& prefix)
     : SimpleThread(prefix + StringPrintf("Worker%d", thread_number)),
-      worker_pool_(worker_pool),
+      worker_pool_(std::move(worker_pool)),
       task_shutdown_behavior_(BLOCK_SHUTDOWN),
       is_processing_task_(false) {
   Start();
@@ -612,7 +611,7 @@
     const tracked_objects::Location& from_here,
     const Closure& task,
     TimeDelta delay) {
-  DCHECK(delay == TimeDelta() || shutdown_behavior == SKIP_ON_SHUTDOWN);
+  DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN);
   SequencedTask sequenced(from_here);
   sequenced.sequence_token_id = sequence_token.id_;
   sequenced.shutdown_behavior = shutdown_behavior;
@@ -789,7 +788,7 @@
     DCHECK(thread_being_created_);
     thread_being_created_ = false;
     auto result = threads_.insert(
-        std::make_pair(this_worker->tid(), make_scoped_ptr(this_worker)));
+        std::make_pair(this_worker->tid(), WrapUnique(this_worker)));
     DCHECK(result.second);
 
     while (true) {
@@ -812,6 +811,8 @@
             TRACE_EVENT_FLAG_FLOW_IN,
             "src_file", task.posted_from.file_name(),
             "src_func", task.posted_from.function_name());
+        TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION task_event(
+            task.posted_from.file_name());
         int new_thread_id = WillRunWorkerTask(task);
         {
           AutoUnlock unlock(lock_);
@@ -1316,7 +1317,7 @@
     const Closure& task,
     TimeDelta delay) {
   WorkerShutdown shutdown_behavior =
-      delay == TimeDelta() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
+      delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
   return inner_->PostTask(NULL, SequenceToken(), shutdown_behavior,
                           from_here, task, delay);
 }
@@ -1343,7 +1344,7 @@
     const Closure& task,
     TimeDelta delay) {
   WorkerShutdown shutdown_behavior =
-      delay == TimeDelta() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
+      delay.is_zero() ? BLOCK_SHUTDOWN : SKIP_ON_SHUTDOWN;
   return inner_->PostTask(NULL, sequence_token, shutdown_behavior,
                           from_here, task, delay);
 }
diff --git a/base/threading/sequenced_worker_pool.h b/base/threading/sequenced_worker_pool.h
index ba0e444..cbec395 100644
--- a/base/threading/sequenced_worker_pool.h
+++ b/base/threading/sequenced_worker_pool.h
@@ -8,13 +8,13 @@
 #include <stddef.h>
 
 #include <cstddef>
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/callback_forward.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/single_thread_task_runner.h"
 #include "base/task_runner.h"
 
@@ -374,7 +374,7 @@
 
   // Avoid pulling in too many headers by putting (almost) everything
   // into |inner_|.
-  const scoped_ptr<Inner> inner_;
+  const std::unique_ptr<Inner> inner_;
 
   DISALLOW_COPY_AND_ASSIGN(SequencedWorkerPool);
 };
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
index 2a27608..b6fead6 100644
--- a/base/threading/thread.cc
+++ b/base/threading/thread.cc
@@ -103,8 +103,8 @@
     type = MessageLoop::TYPE_CUSTOM;
 
   message_loop_timer_slack_ = options.timer_slack;
-  scoped_ptr<MessageLoop> message_loop = MessageLoop::CreateUnbound(
-      type, options.message_pump_factory);
+  std::unique_ptr<MessageLoop> message_loop =
+      MessageLoop::CreateUnbound(type, options.message_pump_factory);
   message_loop_ = message_loop.get();
   start_event_.Reset();
 
@@ -225,13 +225,13 @@
 
   // Lazily initialize the message_loop so that it can run on this thread.
   DCHECK(message_loop_);
-  scoped_ptr<MessageLoop> message_loop(message_loop_);
+  std::unique_ptr<MessageLoop> message_loop(message_loop_);
   message_loop_->BindToCurrentThread();
   message_loop_->set_thread_name(name_);
   message_loop_->SetTimerSlack(message_loop_timer_slack_);
 
 #if defined(OS_WIN)
-  scoped_ptr<win::ScopedCOMInitializer> com_initializer;
+  std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
   if (com_status_ != NONE) {
     com_initializer.reset((com_status_ == STA) ?
         new win::ScopedCOMInitializer() :
diff --git a/base/threading/thread.h b/base/threading/thread.h
index ec19722..c9a77d7 100644
--- a/base/threading/thread.h
+++ b/base/threading/thread.h
@@ -7,12 +7,12 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/callback.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/message_loop/timer_slack.h"
 #include "base/single_thread_task_runner.h"
@@ -41,7 +41,7 @@
 class BASE_EXPORT Thread : PlatformThread::Delegate {
  public:
   struct BASE_EXPORT Options {
-    typedef Callback<scoped_ptr<MessagePump>()> MessagePumpFactory;
+    typedef Callback<std::unique_ptr<MessagePump>()> MessagePumpFactory;
 
     Options();
     Options(MessageLoop::Type type, size_t size);
diff --git a/base/threading/thread_checker_unittest.cc b/base/threading/thread_checker_unittest.cc
index fd98f76..bc5b1e4 100644
--- a/base/threading/thread_checker_unittest.cc
+++ b/base/threading/thread_checker_unittest.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/threading/thread_checker.h"
+
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/threading/simple_thread.h"
-#include "base/threading/thread_checker.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 // Duplicated from base/threading/thread_checker.h so that we can be
@@ -72,7 +74,7 @@
   void Run() override { thread_checker_class_.reset(); }
 
  private:
-  scoped_ptr<ThreadCheckerClass> thread_checker_class_;
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class_;
 
   DISALLOW_COPY_AND_ASSIGN(DeleteThreadCheckerClassOnThread);
 };
@@ -80,7 +82,7 @@
 }  // namespace
 
 TEST(ThreadCheckerTest, CallsAllowedOnSameThread) {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // Verify that DoStuff doesn't assert.
@@ -91,7 +93,7 @@
 }
 
 TEST(ThreadCheckerTest, DestructorAllowedOnDifferentThread) {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // Verify that the destructor doesn't assert
@@ -104,7 +106,7 @@
 }
 
 TEST(ThreadCheckerTest, DetachFromThread) {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // Verify that DoStuff doesn't assert when called on a different thread after
@@ -119,7 +121,7 @@
 #if GTEST_HAS_DEATH_TEST || !ENABLE_THREAD_CHECKER
 
 void ThreadCheckerClass::MethodOnDifferentThreadImpl() {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // DoStuff should assert in debug builds only when called on a
@@ -143,7 +145,7 @@
 #endif  // ENABLE_THREAD_CHECKER
 
 void ThreadCheckerClass::DetachThenCallFromDifferentThreadImpl() {
-  scoped_ptr<ThreadCheckerClass> thread_checker_class(
+  std::unique_ptr<ThreadCheckerClass> thread_checker_class(
       new ThreadCheckerClass);
 
   // DoStuff doesn't assert when called on a different thread
diff --git a/base/threading/thread_collision_warner_unittest.cc b/base/threading/thread_collision_warner_unittest.cc
index 79ca7e2..71447ef 100644
--- a/base/threading/thread_collision_warner_unittest.cc
+++ b/base/threading/thread_collision_warner_unittest.cc
@@ -2,13 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/threading/thread_collision_warner.h"
+
+#include <memory>
+
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/simple_thread.h"
-#include "base/threading/thread_collision_warner.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 // '' : local class member function does not have a body
@@ -19,7 +21,7 @@
 
 // Would cause a memory leak otherwise.
 #undef DFAKE_MUTEX
-#define DFAKE_MUTEX(obj) scoped_ptr<base::AsserterBase> obj
+#define DFAKE_MUTEX(obj) std::unique_ptr<base::AsserterBase> obj
 
 // In Release, we expect the AsserterBase::warn() to not happen.
 #define EXPECT_NDEBUG_FALSE_DEBUG_TRUE EXPECT_FALSE
diff --git a/base/threading/thread_id_name_manager.cc b/base/threading/thread_id_name_manager.cc
index 56cfa27..107e0dc 100644
--- a/base/threading/thread_id_name_manager.cc
+++ b/base/threading/thread_id_name_manager.cc
@@ -10,6 +10,7 @@
 #include "base/logging.h"
 #include "base/memory/singleton.h"
 #include "base/strings/string_util.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 
 namespace base {
 namespace {
@@ -50,27 +51,37 @@
 
 void ThreadIdNameManager::SetName(PlatformThreadId id,
                                   const std::string& name) {
-  AutoLock locked(lock_);
-  NameToInternedNameMap::iterator iter = name_to_interned_name_.find(name);
   std::string* leaked_str = NULL;
-  if (iter != name_to_interned_name_.end()) {
-    leaked_str = iter->second;
-  } else {
-    leaked_str = new std::string(name);
-    name_to_interned_name_[name] = leaked_str;
+  {
+    AutoLock locked(lock_);
+    NameToInternedNameMap::iterator iter = name_to_interned_name_.find(name);
+    if (iter != name_to_interned_name_.end()) {
+      leaked_str = iter->second;
+    } else {
+      leaked_str = new std::string(name);
+      name_to_interned_name_[name] = leaked_str;
+    }
+
+    ThreadIdToHandleMap::iterator id_to_handle_iter =
+        thread_id_to_handle_.find(id);
+
+    // The main thread of a process will not be created as a Thread object which
+    // means there is no PlatformThreadHandler registered.
+    if (id_to_handle_iter == thread_id_to_handle_.end()) {
+      main_process_name_ = leaked_str;
+      main_process_id_ = id;
+      return;
+    }
+    thread_handle_to_interned_name_[id_to_handle_iter->second] = leaked_str;
   }
 
-  ThreadIdToHandleMap::iterator id_to_handle_iter =
-      thread_id_to_handle_.find(id);
-
-  // The main thread of a process will not be created as a Thread object which
-  // means there is no PlatformThreadHandler registered.
-  if (id_to_handle_iter == thread_id_to_handle_.end()) {
-    main_process_name_ = leaked_str;
-    main_process_id_ = id;
-    return;
-  }
-  thread_handle_to_interned_name_[id_to_handle_iter->second] = leaked_str;
+  // Add the leaked thread name to heap profiler context tracker. The name added
+  // is valid for the lifetime of the process. AllocationContextTracker cannot
+  // call GetName(which holds a lock) during the first allocation because it can
+  // cause a deadlock when the first allocation happens in the
+  // ThreadIdNameManager itself when holding the lock.
+  trace_event::AllocationContextTracker::SetCurrentThreadName(
+      leaked_str->c_str());
 }
 
 const char* ThreadIdNameManager::GetName(PlatformThreadId id) {
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index 5c1fb13..d8e3cb1 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -37,8 +37,8 @@
 class BrowserGpuChannelHostFactory;
 class BrowserGpuMemoryBufferManager;
 class BrowserShutdownProfileDumper;
+class BrowserSurfaceViewManager;
 class BrowserTestBase;
-class GpuChannelHost;
 class NestedMessagePumpAndroid;
 class ScopedAllowWaitForAndroidLayoutTests;
 class ScopedAllowWaitForDebugURL;
@@ -56,6 +56,9 @@
 namespace gles2 {
 class CommandBufferClientImpl;
 }
+namespace gpu {
+class GpuChannelHost;
+}
 namespace mojo {
 namespace common {
 class MessagePumpMojo;
@@ -177,9 +180,9 @@
 #else
   // Inline the empty definitions of these functions so that they can be
   // compiled out.
-  static bool SetIOAllowed(bool /* allowed */) { return true; }
+  static bool SetIOAllowed(bool) { return true; }
   static void AssertIOAllowed() {}
-  static bool SetSingletonAllowed(bool /* allowed */) { return true; }
+  static bool SetSingletonAllowed(bool) { return true; }
   static void AssertSingletonAllowed() {}
   static void DisallowWaiting() {}
   static void AssertWaitAllowed() {}
@@ -189,6 +192,7 @@
   // DO NOT ADD ANY OTHER FRIEND STATEMENTS, talk to jam or brettw first.
   // BEGIN ALLOWED USAGE.
   friend class content::BrowserShutdownProfileDumper;
+  friend class content::BrowserSurfaceViewManager;
   friend class content::BrowserTestBase;
   friend class content::NestedMessagePumpAndroid;
   friend class content::ScopedAllowWaitForAndroidLayoutTests;
@@ -221,11 +225,11 @@
       content::BrowserGpuChannelHostFactory;      // http://crbug.com/125248
   friend class
       content::BrowserGpuMemoryBufferManager;     // http://crbug.com/420368
-  friend class content::GpuChannelHost;           // http://crbug.com/125264
   friend class content::TextInputClientMac;       // http://crbug.com/121917
   friend class dbus::Bus;                         // http://crbug.com/125222
   friend class disk_cache::BackendImpl;           // http://crbug.com/74623
   friend class disk_cache::InFlightIO;            // http://crbug.com/74623
+  friend class gpu::GpuChannelHost;               // http://crbug.com/125264
   friend class net::internal::AddressTrackerLinux;  // http://crbug.com/125097
   friend class net::NetworkChangeNotifierMac;     // http://crbug.com/125097
   friend class ::BrowserProcessImpl;              // http://crbug.com/125207
@@ -239,7 +243,7 @@
 #if ENABLE_THREAD_RESTRICTIONS
   static bool SetWaitAllowed(bool allowed);
 #else
-  static bool SetWaitAllowed(bool /* allowed */) { return true; }
+  static bool SetWaitAllowed(bool) { return true; }
 #endif
 
   // Constructing a ScopedAllowWait temporarily allows waiting on the current
diff --git a/base/thread_task_runner_handle.cc b/base/threading/thread_task_runner_handle.cc
similarity index 62%
rename from base/thread_task_runner_handle.cc
rename to base/threading/thread_task_runner_handle.cc
index ee337b3..1b7c13a 100644
--- a/base/thread_task_runner_handle.cc
+++ b/base/threading/thread_task_runner_handle.cc
@@ -1,18 +1,22 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_task_runner_handle.h"
+
+#include <utility>
 
 #include "base/lazy_instance.h"
+#include "base/logging.h"
 #include "base/single_thread_task_runner.h"
+#include "base/threading/sequenced_task_runner_handle.h"
 #include "base/threading/thread_local.h"
 
 namespace base {
 
 namespace {
 
-base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle> >::Leaky
+base::LazyInstance<base::ThreadLocalPointer<ThreadTaskRunnerHandle>>::Leaky
     lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
 
 }  // namespace
@@ -26,21 +30,23 @@
 
 // static
 bool ThreadTaskRunnerHandle::IsSet() {
-  return lazy_tls_ptr.Pointer()->Get() != NULL;
+  return !!lazy_tls_ptr.Pointer()->Get();
 }
 
 ThreadTaskRunnerHandle::ThreadTaskRunnerHandle(
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner)
-    : task_runner_(task_runner) {
+    scoped_refptr<SingleThreadTaskRunner> task_runner)
+    : task_runner_(std::move(task_runner)) {
   DCHECK(task_runner_->BelongsToCurrentThread());
-  DCHECK(!lazy_tls_ptr.Pointer()->Get());
+  // No SequencedTaskRunnerHandle (which includes ThreadTaskRunnerHandles)
+  // should already be set for this thread.
+  DCHECK(!SequencedTaskRunnerHandle::IsSet());
   lazy_tls_ptr.Pointer()->Set(this);
 }
 
 ThreadTaskRunnerHandle::~ThreadTaskRunnerHandle() {
   DCHECK(task_runner_->BelongsToCurrentThread());
   DCHECK_EQ(lazy_tls_ptr.Pointer()->Get(), this);
-  lazy_tls_ptr.Pointer()->Set(NULL);
+  lazy_tls_ptr.Pointer()->Set(nullptr);
 }
 
 }  // namespace base
diff --git a/base/thread_task_runner_handle.h b/base/threading/thread_task_runner_handle.h
similarity index 76%
rename from base/thread_task_runner_handle.h
rename to base/threading/thread_task_runner_handle.h
index 197669e..72ce49e 100644
--- a/base/thread_task_runner_handle.h
+++ b/base/threading/thread_task_runner_handle.h
@@ -1,11 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef BASE_THREAD_TASK_RUNNER_HANDLE_H_
-#define BASE_THREAD_TASK_RUNNER_HANDLE_H_
+#ifndef BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
+#define BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
 
 #include "base/base_export.h"
+#include "base/macros.h"
 #include "base/memory/ref_counted.h"
 
 namespace base {
@@ -29,13 +30,15 @@
   // Binds |task_runner| to the current thread. |task_runner| must belong
   // to the current thread for this to succeed.
   explicit ThreadTaskRunnerHandle(
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner);
+      scoped_refptr<SingleThreadTaskRunner> task_runner);
   ~ThreadTaskRunnerHandle();
 
  private:
   scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(ThreadTaskRunnerHandle);
 };
 
 }  // namespace base
 
-#endif  // BASE_THREAD_TASK_RUNNER_HANDLE_H_
+#endif  // BASE_THREADING_THREAD_TASK_RUNNER_HANDLE_H_
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index 20d031f..bc27088 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -286,7 +286,7 @@
 
 TEST_F(ThreadTest, ThreadNotStarted) {
   Thread a("Inert");
-  EXPECT_EQ(nullptr, a.task_runner());
+  EXPECT_FALSE(a.task_runner());
 }
 
 TEST_F(ThreadTest, MultipleWaitUntilThreadStarted) {
diff --git a/base/threading/worker_pool_posix.cc b/base/threading/worker_pool_posix.cc
index e6b1d64..17c3342 100644
--- a/base/threading/worker_pool_posix.cc
+++ b/base/threading/worker_pool_posix.cc
@@ -53,7 +53,7 @@
 
 void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
                               const base::Closure& task,
-                              bool /* task_is_slow */) {
+                              bool /*task_is_slow*/) {
   pool_->PostTask(from_here, task);
 }
 
@@ -86,9 +86,7 @@
     PendingTask pending_task = pool_->WaitForTask();
     if (pending_task.task.is_null())
       break;
-    TRACE_EVENT2("toplevel", "WorkerThread::ThreadMain::Run",
-        "src_file", pending_task.posted_from.file_name(),
-        "src_func", pending_task.posted_from.function_name());
+    TRACE_TASK_EXECUTION("WorkerThread::ThreadMain::Run", pending_task);
 
     tracked_objects::TaskStopwatch stopwatch;
     stopwatch.Start();
diff --git a/base/threading/worker_pool_posix.h b/base/threading/worker_pool_posix.h
index f8971ac..628e2b6 100644
--- a/base/threading/worker_pool_posix.h
+++ b/base/threading/worker_pool_posix.h
@@ -24,6 +24,7 @@
 #ifndef BASE_THREADING_WORKER_POOL_POSIX_H_
 #define BASE_THREADING_WORKER_POOL_POSIX_H_
 
+#include <memory>
 #include <queue>
 #include <string>
 
@@ -31,7 +32,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/pending_task.h"
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
@@ -88,7 +88,7 @@
   bool terminated_;
   // Only used for tests to ensure correct thread ordering.  It will always be
   // NULL in non-test code.
-  scoped_ptr<ConditionVariable> num_idle_threads_cv_;
+  std::unique_ptr<ConditionVariable> num_idle_threads_cv_;
 
   DISALLOW_COPY_AND_ASSIGN(PosixDynamicThreadPool);
 };
diff --git a/base/time/time.h b/base/time/time.h
index 066d910..399ec82 100644
--- a/base/time/time.h
+++ b/base/time/time.h
@@ -74,12 +74,12 @@
 // For FILETIME in FromFileTime, until it moves to a new converter class.
 // See TODO(iyengar) below.
 #include <windows.h>
-
 #include "base/gtest_prod_util.h"
 #endif
 
 namespace base {
 
+class PlatformThreadHandle;
 class TimeDelta;
 
 // The functions in the time_internal namespace are meant to be used only by the
@@ -106,14 +106,14 @@
   }
 
   // Converts units of time to TimeDeltas.
-  static TimeDelta FromDays(int days);
-  static TimeDelta FromHours(int hours);
-  static TimeDelta FromMinutes(int minutes);
-  static TimeDelta FromSeconds(int64_t secs);
-  static TimeDelta FromMilliseconds(int64_t ms);
-  static TimeDelta FromSecondsD(double secs);
-  static TimeDelta FromMillisecondsD(double ms);
-  static TimeDelta FromMicroseconds(int64_t us);
+  static constexpr TimeDelta FromDays(int days);
+  static constexpr TimeDelta FromHours(int hours);
+  static constexpr TimeDelta FromMinutes(int minutes);
+  static constexpr TimeDelta FromSeconds(int64_t secs);
+  static constexpr TimeDelta FromMilliseconds(int64_t ms);
+  static constexpr TimeDelta FromSecondsD(double secs);
+  static constexpr TimeDelta FromMillisecondsD(double ms);
+  static constexpr TimeDelta FromMicroseconds(int64_t us);
 #if defined(OS_WIN)
   static TimeDelta FromQPCValue(LONGLONG qpc_value);
 #endif
@@ -222,22 +222,22 @@
   }
 
   // Comparison operators.
-  bool operator==(TimeDelta other) const {
+  constexpr bool operator==(TimeDelta other) const {
     return delta_ == other.delta_;
   }
-  bool operator!=(TimeDelta other) const {
+  constexpr bool operator!=(TimeDelta other) const {
     return delta_ != other.delta_;
   }
-  bool operator<(TimeDelta other) const {
+  constexpr bool operator<(TimeDelta other) const {
     return delta_ < other.delta_;
   }
-  bool operator<=(TimeDelta other) const {
+  constexpr bool operator<=(TimeDelta other) const {
     return delta_ <= other.delta_;
   }
-  bool operator>(TimeDelta other) const {
+  constexpr bool operator>(TimeDelta other) const {
     return delta_ > other.delta_;
   }
-  bool operator>=(TimeDelta other) const {
+  constexpr bool operator>=(TimeDelta other) const {
     return delta_ >= other.delta_;
   }
 
@@ -248,10 +248,14 @@
   // Constructs a delta given the duration in microseconds. This is private
   // to avoid confusion by callers with an integer constructor. Use
   // FromSeconds, FromMilliseconds, etc. instead.
-  explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
+  constexpr explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {}
 
   // Private method to build a delta from a double.
-  static TimeDelta FromDouble(double value);
+  static constexpr TimeDelta FromDouble(double value);
+
+  // Private method to build a delta from the product of a user-provided value
+  // and a known-positive value.
+  static constexpr TimeDelta FromProduct(int64_t value, int64_t positive_value);
 
   // Delta in microseconds.
   int64_t delta_;
@@ -575,63 +579,72 @@
                                  Time* parsed_time);
 };
 
-// Inline the TimeDelta factory methods, for fast TimeDelta construction.
-
 // static
-inline TimeDelta TimeDelta::FromDays(int days) {
-  if (days == std::numeric_limits<int>::max())
-    return Max();
-  return TimeDelta(days * Time::kMicrosecondsPerDay);
+constexpr TimeDelta TimeDelta::FromDays(int days) {
+  return days == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(days * Time::kMicrosecondsPerDay);
 }
 
 // static
-inline TimeDelta TimeDelta::FromHours(int hours) {
-  if (hours == std::numeric_limits<int>::max())
-    return Max();
-  return TimeDelta(hours * Time::kMicrosecondsPerHour);
+constexpr TimeDelta TimeDelta::FromHours(int hours) {
+  return hours == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(hours * Time::kMicrosecondsPerHour);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMinutes(int minutes) {
-  if (minutes == std::numeric_limits<int>::max())
-    return Max();
-  return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+constexpr TimeDelta TimeDelta::FromMinutes(int minutes) {
+  return minutes == std::numeric_limits<int>::max()
+             ? Max()
+             : TimeDelta(minutes * Time::kMicrosecondsPerMinute);
 }
 
 // static
-inline TimeDelta TimeDelta::FromSeconds(int64_t secs) {
-  return TimeDelta(secs) * Time::kMicrosecondsPerSecond;
+constexpr TimeDelta TimeDelta::FromSeconds(int64_t secs) {
+  return FromProduct(secs, Time::kMicrosecondsPerSecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
-  return TimeDelta(ms) * Time::kMicrosecondsPerMillisecond;
+constexpr TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
+  return FromProduct(ms, Time::kMicrosecondsPerMillisecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromSecondsD(double secs) {
+constexpr TimeDelta TimeDelta::FromSecondsD(double secs) {
   return FromDouble(secs * Time::kMicrosecondsPerSecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMillisecondsD(double ms) {
+constexpr TimeDelta TimeDelta::FromMillisecondsD(double ms) {
   return FromDouble(ms * Time::kMicrosecondsPerMillisecond);
 }
 
 // static
-inline TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
+constexpr TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
   return TimeDelta(us);
 }
 
 // static
-inline TimeDelta TimeDelta::FromDouble(double value) {
-  double max_magnitude = std::numeric_limits<int64_t>::max();
-  TimeDelta delta = TimeDelta(static_cast<int64_t>(value));
-  if (value > max_magnitude)
-    delta = Max();
-  else if (value < -max_magnitude)
-    delta = -Max();
-  return delta;
+constexpr TimeDelta TimeDelta::FromDouble(double value) {
+  // TODO(crbug.com/612601): Use saturated_cast<int64_t>(value) once we sort out
+  // the Min() behavior.
+  return value > std::numeric_limits<int64_t>::max()
+             ? Max()
+             : value < -std::numeric_limits<int64_t>::max()
+                   ? -Max()
+                   : TimeDelta(static_cast<int64_t>(value));
+}
+
+// static
+constexpr TimeDelta TimeDelta::FromProduct(int64_t value,
+                                           int64_t positive_value) {
+  return (DCHECK(positive_value > 0),
+          value > std::numeric_limits<int64_t>::max() / positive_value
+              ? Max()
+              : value < -std::numeric_limits<int64_t>::max() / positive_value
+                    ? -Max()
+                    : TimeDelta(value * positive_value));
 }
 
 // For logging use only.
@@ -642,6 +655,15 @@
 // Represents monotonically non-decreasing clock time.
 class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
  public:
+  // The underlying clock used to generate new TimeTicks.
+  enum class Clock {
+    LINUX_CLOCK_MONOTONIC,
+    IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME,
+    MAC_MACH_ABSOLUTE_TIME,
+    WIN_QPC,
+    WIN_ROLLOVER_PROTECTED_TIME_GET_TIME
+  };
+
   TimeTicks() : TimeBase(0) {
   }
 
@@ -680,6 +702,11 @@
   TimeTicks SnappedToNextTick(TimeTicks tick_phase,
                               TimeDelta tick_interval) const;
 
+  // Returns an enum indicating the underlying clock being used to generate
+  // TimeTicks timestamps. This function should only be used for debugging and
+  // logging purposes.
+  static Clock GetClock();
+
 #if defined(OS_WIN)
  protected:
   typedef DWORD (*TickFunctionType)(void);
@@ -735,11 +762,18 @@
   // absolutely needed, call WaitUntilInitialized() before this method.
   static ThreadTicks Now();
 
+#if defined(OS_WIN)
+  // Similar to Now() above except this returns thread-specific CPU time for an
+  // arbitrary thread. All comments for Now() method above apply apply to this
+  // method as well.
+  static ThreadTicks GetForThread(const PlatformThreadHandle& thread_handle);
+#endif
+
  private:
   friend class time_internal::TimeBase<ThreadTicks>;
 
-  // Please use Now() to create a new object. This is for internal use
-  // and testing.
+  // Please use Now() or GetForThread() to create a new object. This is for
+  // internal use and testing.
   explicit ThreadTicks(int64_t us) : TimeBase(us) {}
 
 #if defined(OS_WIN)
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
index f0c7804..c23c491 100644
--- a/base/time/time_mac.cc
+++ b/base/time/time_mac.cc
@@ -92,9 +92,11 @@
   MACH_DCHECK(kr == KERN_SUCCESS, kr) << "thread_info";
 
   base::CheckedNumeric<int64_t> absolute_micros(
-      thread_info_data.user_time.seconds);
+      thread_info_data.user_time.seconds +
+      thread_info_data.system_time.seconds);
   absolute_micros *= base::Time::kMicrosecondsPerSecond;
-  absolute_micros += thread_info_data.user_time.microseconds;
+  absolute_micros += (thread_info_data.user_time.microseconds +
+                      thread_info_data.system_time.microseconds);
   return absolute_micros.ValueOrDie();
 #endif  // defined(OS_IOS)
 }
@@ -237,6 +239,15 @@
 }
 
 // static
+TimeTicks::Clock TimeTicks::GetClock() {
+#if defined(OS_IOS)
+  return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
+#else
+  return Clock::MAC_MACH_ABSOLUTE_TIME;
+#endif  // defined(OS_IOS)
+}
+
+// static
 ThreadTicks ThreadTicks::Now() {
   return ThreadTicks(ComputeThreadTicks());
 }
diff --git a/base/time/time_posix.cc b/base/time/time_posix.cc
index 4aadee6..32614bc 100644
--- a/base/time/time_posix.cc
+++ b/base/time/time_posix.cc
@@ -312,6 +312,11 @@
 }
 
 // static
+TimeTicks::Clock TimeTicks::GetClock() {
+  return Clock::LINUX_CLOCK_MONOTONIC;
+}
+
+// static
 bool TimeTicks::IsHighResolution() {
   return true;
 }
diff --git a/base/time/time_unittest.cc b/base/time/time_unittest.cc
index 8a6a7f5..25c6ca5 100644
--- a/base/time/time_unittest.cc
+++ b/base/time/time_unittest.cc
@@ -723,16 +723,21 @@
 }
 
 TEST(TimeDelta, FromAndIn) {
-  EXPECT_TRUE(TimeDelta::FromDays(2) == TimeDelta::FromHours(48));
-  EXPECT_TRUE(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180));
-  EXPECT_TRUE(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120));
-  EXPECT_TRUE(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000));
-  EXPECT_TRUE(TimeDelta::FromMilliseconds(2) ==
-              TimeDelta::FromMicroseconds(2000));
-  EXPECT_TRUE(TimeDelta::FromSecondsD(2.3) ==
-              TimeDelta::FromMilliseconds(2300));
-  EXPECT_TRUE(TimeDelta::FromMillisecondsD(2.5) ==
-              TimeDelta::FromMicroseconds(2500));
+  // static_assert also checks that the contained expression is a constant
+  // expression, meaning all its components are suitable for initializing global
+  // variables.
+  static_assert(TimeDelta::FromDays(2) == TimeDelta::FromHours(48), "");
+  static_assert(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180), "");
+  static_assert(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120), "");
+  static_assert(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000),
+                "");
+  static_assert(
+      TimeDelta::FromMilliseconds(2) == TimeDelta::FromMicroseconds(2000), "");
+  static_assert(
+      TimeDelta::FromSecondsD(2.3) == TimeDelta::FromMilliseconds(2300), "");
+  static_assert(
+      TimeDelta::FromMillisecondsD(2.5) == TimeDelta::FromMicroseconds(2500),
+      "");
   EXPECT_EQ(13, TimeDelta::FromDays(13).InDays());
   EXPECT_EQ(13, TimeDelta::FromHours(13).InHours());
   EXPECT_EQ(13, TimeDelta::FromMinutes(13).InMinutes());
diff --git a/base/timer/hi_res_timer_manager_unittest.cc b/base/timer/hi_res_timer_manager_unittest.cc
index 9416048..a0b0f93 100644
--- a/base/timer/hi_res_timer_manager_unittest.cc
+++ b/base/timer/hi_res_timer_manager_unittest.cc
@@ -4,9 +4,9 @@
 
 #include "base/timer/hi_res_timer_manager.h"
 
+#include <memory>
 #include <utility>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/power_monitor/power_monitor.h"
 #include "base/power_monitor/power_monitor_device_source.h"
@@ -22,9 +22,9 @@
   // Windows, which makes this test flaky if you run while the machine
   // goes in or out of AC power.
   base::MessageLoop loop(base::MessageLoop::TYPE_UI);
-  scoped_ptr<base::PowerMonitorSource> power_monitor_source(
+  std::unique_ptr<base::PowerMonitorSource> power_monitor_source(
       new base::PowerMonitorDeviceSource());
-  scoped_ptr<base::PowerMonitor> power_monitor(
+  std::unique_ptr<base::PowerMonitor> power_monitor(
       new base::PowerMonitor(std::move(power_monitor_source)));
 
   HighResolutionTimerManager manager;
diff --git a/base/timer/timer.cc b/base/timer/timer.cc
index fa6b8cd..e554905 100644
--- a/base/timer/timer.cc
+++ b/base/timer/timer.cc
@@ -9,8 +9,8 @@
 #include "base/logging.h"
 #include "base/memory/ref_counted.h"
 #include "base/single_thread_task_runner.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 
 namespace base {
 
diff --git a/base/timer/timer_unittest.cc b/base/timer/timer_unittest.cc
index b1d3c3e..e56efac 100644
--- a/base/timer/timer_unittest.cc
+++ b/base/timer/timer_unittest.cc
@@ -2,13 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/timer/timer.h"
+
 #include <stddef.h>
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/test/test_simple_task_runner.h"
-#include "base/timer/timer.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -78,7 +80,7 @@
   }
 
   bool* did_run_;
-  scoped_ptr<base::OneShotTimer> timer_;
+  std::unique_ptr<base::OneShotTimer> timer_;
 };
 
 class RepeatingTimerTester {
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index f65b35b..0a04d62 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -156,7 +156,7 @@
 //   };
 //
 //   TRACE_EVENT1("foo", "bar", "data",
-//                scoped_ptr<ConvertableToTraceFormat>(new MyData()));
+//                std::unique_ptr<ConvertableToTraceFormat>(new MyData()));
 //
 // The trace framework will take ownership if the passed pointer and it will
 // be free'd when the trace buffer is flushed.
@@ -926,6 +926,11 @@
                                    name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
                                    arg1_val, arg2_name, arg2_val)
 
+// Special trace event macro to trace task execution with the location where it
+// was posted from.
+#define TRACE_TASK_EXECUTION(run_function, task) \
+  INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
+
 // TRACE_EVENT_METADATA* events are information related to other
 // injected events, not events in their own right.
 #define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
diff --git a/base/trace_event/heap_profiler.h b/base/trace_event/heap_profiler.h
new file mode 100644
index 0000000..cf57524
--- /dev/null
+++ b/base/trace_event/heap_profiler.h
@@ -0,0 +1,89 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_H
+#define BASE_TRACE_EVENT_HEAP_PROFILER_H
+
+#include "base/compiler_specific.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+
+// This header file defines the set of macros that are used to track memory
+// usage in the heap profiler. This is in addition to the macros defined in
+// trace_event.h and are specific to heap profiler. This file also defines
+// implementation details of these macros.
+
+// Implementation detail: heap profiler macros create temporary variables to
+// keep instrumentation overhead low. These macros give each temporary variable
+// a unique name based on the line number to prevent name collisions.
+#define INTERNAL_HEAP_PROFILER_UID3(a, b) heap_profiler_unique_##a##b
+#define INTERNAL_HEAP_PROFILER_UID2(a, b) INTERNAL_HEAP_PROFILER_UID3(a, b)
+#define INTERNAL_HEAP_PROFILER_UID(name_prefix) \
+  INTERNAL_HEAP_PROFILER_UID2(name_prefix, __LINE__)
+
+// Scoped tracker for task execution context in the heap profiler.
+#define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
+  trace_event_internal::HeapProfilerScopedTaskExecutionTracker
+
+// A scoped ignore event used to tell heap profiler to ignore all the
+// allocations in the scope. It is useful to exclude allocations made for
+// tracing from the heap profiler dumps.
+#define HEAP_PROFILER_SCOPED_IGNORE                                          \
+  trace_event_internal::HeapProfilerScopedIgnore INTERNAL_HEAP_PROFILER_UID( \
+      scoped_ignore)
+
+namespace trace_event_internal {
+
+// HeapProfilerScopedTaskExecutionTracker records the current task's context in
+// the heap profiler.
+class HeapProfilerScopedTaskExecutionTracker {
+ public:
+  inline explicit HeapProfilerScopedTaskExecutionTracker(
+      const char* task_context)
+      : context_(task_context) {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+                 AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PushCurrentTaskContext(context_);
+    }
+  }
+
+  inline ~HeapProfilerScopedTaskExecutionTracker() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(AllocationContextTracker::capture_mode() !=
+                 AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PopCurrentTaskContext(context_);
+    }
+  }
+
+ private:
+  const char* context_;
+};
+
+class BASE_EXPORT HeapProfilerScopedIgnore {
+ public:
+  inline HeapProfilerScopedIgnore() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(
+            AllocationContextTracker::capture_mode() !=
+            AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->begin_ignore_scope();
+    }
+  }
+  inline ~HeapProfilerScopedIgnore() {
+    using base::trace_event::AllocationContextTracker;
+    if (UNLIKELY(
+            AllocationContextTracker::capture_mode() !=
+            AllocationContextTracker::CaptureMode::DISABLED)) {
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->end_ignore_scope();
+    }
+  }
+};
+
+}  // namespace trace_event_internal
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_H
diff --git a/base/trace_event/heap_profiler_allocation_context.cc b/base/trace_event/heap_profiler_allocation_context.cc
index dcef5bd..374d504 100644
--- a/base/trace_event/heap_profiler_allocation_context.cc
+++ b/base/trace_event/heap_profiler_allocation_context.cc
@@ -12,27 +12,27 @@
 namespace base {
 namespace trace_event {
 
-// Constructor that does not initialize members.
-AllocationContext::AllocationContext() {}
-
-// static
-AllocationContext AllocationContext::Empty() {
-  AllocationContext ctx;
-
-  for (size_t i = 0; i < arraysize(ctx.backtrace.frames); i++)
-    ctx.backtrace.frames[i] = nullptr;
-
-  ctx.type_name = nullptr;
-
-  return ctx;
+bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
+  return lhs.value < rhs.value;
 }
 
+bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
+  return lhs.value == rhs.value;
+}
+
+bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
+  return !(lhs.value == rhs.value);
+}
+
+Backtrace::Backtrace(): frame_count(0) {}
+
 bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
-  // Pointer equality of the stack frames is assumed, so instead of doing a deep
-  // string comparison on all of the frames, a |memcmp| suffices.
-  return std::memcmp(lhs.frames, rhs.frames, sizeof(lhs.frames)) == 0;
+  if (lhs.frame_count != rhs.frame_count) return false;
+  return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
 }
 
+AllocationContext::AllocationContext(): type_name(nullptr) {}
+
 bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
   return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
 }
@@ -43,11 +43,20 @@
 namespace BASE_HASH_NAMESPACE {
 using base::trace_event::AllocationContext;
 using base::trace_event::Backtrace;
+using base::trace_event::StackFrame;
+
+size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
+  return hash<const void*>()(frame.value);
+}
 
 size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
-  return base::Hash(
-    std::string(reinterpret_cast<const char*>(backtrace.frames),
-                sizeof(backtrace.frames)));
+  const void* values[Backtrace::kMaxFrameCount];
+  for (size_t i = 0; i != backtrace.frame_count; ++i) {
+    values[i] = backtrace.frames[i].value;
+  }
+  return base::SuperFastHash(
+      reinterpret_cast<const char*>(values),
+      static_cast<int>(backtrace.frame_count * sizeof(*values)));
 }
 
 size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
diff --git a/base/trace_event/heap_profiler_allocation_context.h b/base/trace_event/heap_profiler_allocation_context.h
index 8544c78..3566dd0 100644
--- a/base/trace_event/heap_profiler_allocation_context.h
+++ b/base/trace_event/heap_profiler_allocation_context.h
@@ -29,32 +29,60 @@
 // memory used for tracing and accuracy. Measurements done on a prototype
 // revealed that:
 //
-// - In 60 percent of the cases, stack depth <= 7.
-// - In 87 percent of the cases, stack depth <= 9.
-// - In 95 percent of the cases, stack depth <= 11.
+// - In 60 percent of the cases, pseudo stack depth <= 7.
+// - In 87 percent of the cases, pseudo stack depth <= 9.
+// - In 95 percent of the cases, pseudo stack depth <= 11.
 //
 // See the design doc (https://goo.gl/4s7v7b) for more details.
 
-using StackFrame = const char*;
+// Represents (pseudo) stack frame. Used in Backtrace class below.
+//
+// Conceptually stack frame is identified by its value, and type is used
+// mostly to properly format the value. Value is expected to be a valid
+// pointer from process' address space.
+struct BASE_EXPORT StackFrame {
+  enum class Type {
+    TRACE_EVENT_NAME,   // const char* string
+    THREAD_NAME,        // const char* thread name
+    PROGRAM_COUNTER,    // as returned by stack tracing (e.g. by StackTrace)
+  };
+
+  static StackFrame FromTraceEventName(const char* name) {
+    return {Type::TRACE_EVENT_NAME, name};
+  }
+  static StackFrame FromThreadName(const char* name) {
+    return {Type::THREAD_NAME, name};
+  }
+  static StackFrame FromProgramCounter(const void* pc) {
+    return {Type::PROGRAM_COUNTER, pc};
+  }
+
+  Type type;
+  const void* value;
+};
+
+bool BASE_EXPORT operator < (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator == (const StackFrame& lhs, const StackFrame& rhs);
+bool BASE_EXPORT operator != (const StackFrame& lhs, const StackFrame& rhs);
 
 struct BASE_EXPORT Backtrace {
-  // Unused backtrace frames are filled with nullptr frames. If the stack is
-  // higher than what can be stored here, the bottom frames are stored. Based
-  // on the data above, a depth of 12 captures the full stack in the vast
-  // majority of the cases.
-  StackFrame frames[12];
+  Backtrace();
+
+  // If the stack is higher than what can be stored here, the bottom frames
+  // (the ones closer to main()) are stored. Depth of 12 is enough for most
+  // pseudo traces (see above), but not for native traces, where we need more.
+  enum { kMaxFrameCount = 24 };
+  StackFrame frames[kMaxFrameCount];
+  size_t frame_count;
 };
 
 bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
 
 // The |AllocationContext| is context metadata that is kept for every allocation
 // when heap profiling is enabled. To simplify memory management for book-
-// keeping, this struct has a fixed size. All |const char*|s here must have
-// static lifetime.
+// keeping, this struct has a fixed size.
 struct BASE_EXPORT AllocationContext {
- public:
-  // An allocation context with empty backtrace and unknown type.
-  static AllocationContext Empty();
+  AllocationContext();
 
   Backtrace backtrace;
 
@@ -63,26 +91,28 @@
   // deep string comparison. In a component build, where a type name can have a
   // string literal in several dynamic libraries, this may distort grouping.
   const char* type_name;
-
- private:
-  friend class AllocationContextTracker;
-
-  // Don't allow uninitialized instances except inside the allocation context
-  // tracker. Except in tests, an |AllocationContext| should only be obtained
-  // from the tracker. In tests, paying the overhead of initializing the struct
-  // to |Empty| and then overwriting the members is not such a big deal.
-  AllocationContext();
 };
 
 bool BASE_EXPORT operator==(const AllocationContext& lhs,
                             const AllocationContext& rhs);
 
+// Struct to store the size and count of the allocations.
+struct AllocationMetrics {
+  size_t size;
+  size_t count;
+};
+
 }  // namespace trace_event
 }  // namespace base
 
 namespace BASE_HASH_NAMESPACE {
 
 template <>
+struct BASE_EXPORT hash<base::trace_event::StackFrame> {
+  size_t operator()(const base::trace_event::StackFrame& frame) const;
+};
+
+template <>
 struct BASE_EXPORT hash<base::trace_event::Backtrace> {
   size_t operator()(const base::trace_event::Backtrace& backtrace) const;
 };
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.cc b/base/trace_event/heap_profiler_allocation_context_tracker.cc
index 791ab7a..fac4a8a 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -8,16 +8,29 @@
 #include <iterator>
 
 #include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/threading/platform_thread.h"
 #include "base/threading/thread_local_storage.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
 namespace base {
 namespace trace_event {
 
-subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0;
+subtle::Atomic32 AllocationContextTracker::capture_mode_ =
+    static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
 
 namespace {
 
+const size_t kMaxStackDepth = 128u;
+const size_t kMaxTaskDepth = 16u;
+AllocationContextTracker* const kInitializingSentinel =
+    reinterpret_cast<AllocationContextTracker*>(-1);
+const char kTracingOverhead[] = "tracing_overhead";
+
 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
 
 // This function is added to the TLS slot to clean up the instance when the
@@ -26,17 +39,41 @@
   delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
 }
 
+// Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
+// deadlock when lock is already held by ThreadIdNameManager before the current
+// allocation. Gets the thread name from kernel if available or returns a string
+// with id. This function intenionally leaks the allocated strings since they
+// are used to tag allocations even after the thread dies.
+const char* GetAndLeakThreadName() {
+  char name[16];
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  // If the thread name is not set, try to get it from prctl. Thread name might
+  // not be set in cases where the thread started before heap profiling was
+  // enabled.
+  int err = prctl(PR_GET_NAME, name);
+  if (!err) {
+    return strdup(name);
+  }
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+  // Use tid if we don't have a thread name.
+  snprintf(name, sizeof(name), "%lu",
+           static_cast<unsigned long>(PlatformThread::CurrentId()));
+  return strdup(name);
+}
+
 }  // namespace
 
-AllocationContextTracker::AllocationContextTracker() {}
-AllocationContextTracker::~AllocationContextTracker() {}
-
 // static
-AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() {
-  auto tracker =
+AllocationContextTracker*
+AllocationContextTracker::GetInstanceForCurrentThread() {
+  AllocationContextTracker* tracker =
       static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get());
+  if (tracker == kInitializingSentinel)
+    return nullptr;  // Re-entrancy case.
 
   if (!tracker) {
+    g_tls_alloc_ctx_tracker.Set(kInitializingSentinel);
     tracker = new AllocationContextTracker();
     g_tls_alloc_ctx_tracker.Set(tracker);
   }
@@ -44,69 +81,163 @@
   return tracker;
 }
 
+AllocationContextTracker::AllocationContextTracker()
+    : thread_name_(nullptr), ignore_scope_depth_(0) {
+  pseudo_stack_.reserve(kMaxStackDepth);
+  task_contexts_.reserve(kMaxTaskDepth);
+}
+AllocationContextTracker::~AllocationContextTracker() {}
+
 // static
-void AllocationContextTracker::SetCaptureEnabled(bool enabled) {
+void AllocationContextTracker::SetCurrentThreadName(const char* name) {
+  if (name && capture_mode() != CaptureMode::DISABLED) {
+    GetInstanceForCurrentThread()->thread_name_ = name;
+  }
+}
+
+// static
+void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
   // When enabling capturing, also initialize the TLS slot. This does not create
   // a TLS instance yet.
-  if (enabled && !g_tls_alloc_ctx_tracker.initialized())
+  if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
     g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
 
-  // Release ordering ensures that when a thread observes |capture_enabled_| to
+  // Release ordering ensures that when a thread observes |capture_mode_| to
   // be true through an acquire load, the TLS slot has been initialized.
-  subtle::Release_Store(&capture_enabled_, enabled);
+  subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
 }
 
-// static
-void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) {
-  auto tracker = AllocationContextTracker::GetThreadLocalTracker();
-
+void AllocationContextTracker::PushPseudoStackFrame(
+    const char* trace_event_name) {
   // Impose a limit on the height to verify that every push is popped, because
   // in practice the pseudo stack never grows higher than ~20 frames.
-  DCHECK_LT(tracker->pseudo_stack_.size(), 128u);
-  tracker->pseudo_stack_.push_back(frame);
+  if (pseudo_stack_.size() < kMaxStackDepth)
+    pseudo_stack_.push_back(trace_event_name);
+  else
+    NOTREACHED();
 }
 
-// static
-void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) {
-  auto tracker = AllocationContextTracker::GetThreadLocalTracker();
-
+void AllocationContextTracker::PopPseudoStackFrame(
+    const char* trace_event_name) {
   // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
   // scope, the frame was never pushed, so it is possible that pop is called
   // on an empty stack.
-  if (tracker->pseudo_stack_.empty())
+  if (pseudo_stack_.empty())
     return;
 
   // Assert that pushes and pops are nested correctly. This DCHECK can be
   // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
   // without a corresponding TRACE_EVENT_BEGIN).
-  DCHECK_EQ(frame, tracker->pseudo_stack_.back())
+  DCHECK_EQ(trace_event_name, pseudo_stack_.back())
       << "Encountered an unmatched TRACE_EVENT_END";
 
-  tracker->pseudo_stack_.pop_back();
+  pseudo_stack_.pop_back();
+}
+
+void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
+  DCHECK(context);
+  if (task_contexts_.size() < kMaxTaskDepth)
+    task_contexts_.push_back(context);
+  else
+    NOTREACHED();
+}
+
+void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
+  // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
+  // scope, the context was never pushed, so it is possible that pop is called
+  // on an empty stack.
+  if (task_contexts_.empty())
+    return;
+
+  DCHECK_EQ(context, task_contexts_.back())
+      << "Encountered an unmatched context end";
+  task_contexts_.pop_back();
 }
 
 // static
 AllocationContext AllocationContextTracker::GetContextSnapshot() {
-  AllocationContextTracker* tracker = GetThreadLocalTracker();
   AllocationContext ctx;
 
-  // Fill the backtrace.
-  {
-    auto src = tracker->pseudo_stack_.begin();
-    auto dst = std::begin(ctx.backtrace.frames);
-    auto src_end = tracker->pseudo_stack_.end();
-    auto dst_end = std::end(ctx.backtrace.frames);
-
-    // Copy as much of the bottom of the pseudo stack into the backtrace as
-    // possible.
-    for (; src != src_end && dst != dst_end; src++, dst++)
-      *dst = *src;
-
-    // If there is room for more, fill the remaining slots with empty frames.
-    std::fill(dst, dst_end, nullptr);
+  if (ignore_scope_depth_) {
+    ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
+    ctx.type_name = kTracingOverhead;
+    ctx.backtrace.frame_count = 1;
+    return ctx;
   }
 
-  ctx.type_name = nullptr;
+  CaptureMode mode = static_cast<CaptureMode>(
+      subtle::NoBarrier_Load(&capture_mode_));
+
+  auto backtrace = std::begin(ctx.backtrace.frames);
+  auto backtrace_end = std::end(ctx.backtrace.frames);
+
+  if (!thread_name_) {
+    // Ignore the string allocation made by GetAndLeakThreadName to avoid
+    // reentrancy.
+    ignore_scope_depth_++;
+    thread_name_ = GetAndLeakThreadName();
+    ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
+    DCHECK(thread_name_);
+    ignore_scope_depth_--;
+  }
+
+  // Add the thread name as the first entry in pseudo stack.
+  if (thread_name_) {
+    *backtrace++ = StackFrame::FromThreadName(thread_name_);
+  }
+
+  switch (mode) {
+    case CaptureMode::DISABLED:
+      {
+        break;
+      }
+    case CaptureMode::PSEUDO_STACK:
+      {
+        for (const char* event_name: pseudo_stack_) {
+          if (backtrace == backtrace_end) {
+            break;
+          }
+          *backtrace++ = StackFrame::FromTraceEventName(event_name);
+        }
+        break;
+      }
+    case CaptureMode::NATIVE_STACK:
+      {
+        // Backtrace contract requires us to return bottom frames, i.e.
+        // from main() and up. Stack unwinding produces top frames, i.e.
+        // from this point and up until main(). We request many frames to
+        // make sure we reach main(), and then copy bottom portion of them.
+        const void* frames[128];
+        static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
+                      "not requesting enough frames to fill Backtrace");
+#if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL)
+        size_t frame_count = debug::TraceStackFramePointers(
+            frames,
+            arraysize(frames),
+            1 /* exclude this function from the trace */ );
+#else
+        size_t frame_count = 0;
+        NOTREACHED();
+#endif
+
+        // Copy frames backwards
+        size_t backtrace_capacity = backtrace_end - backtrace;
+        size_t top_frame_index = (backtrace_capacity >= frame_count) ?
+            0 :
+            frame_count - backtrace_capacity;
+        for (size_t i = frame_count; i > top_frame_index;) {
+          const void* frame = frames[--i];
+          *backtrace++ = StackFrame::FromProgramCounter(frame);
+        }
+        break;
+      }
+  }
+
+  ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
+
+  // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
+  // (component name) in the heap profiler and not piggy back on the type name.
+  ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
 
   return ctx;
 }
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker.h b/base/trace_event/heap_profiler_allocation_context_tracker.h
index 9c9a313..454200c 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -9,6 +9,7 @@
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
+#include "base/debug/stack_trace.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
@@ -23,46 +24,82 @@
 // details.
 class BASE_EXPORT AllocationContextTracker {
  public:
-  // Globally enables capturing allocation context.
-  // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future?
-  // Or at least have something that guards agains enable -> disable -> enable?
-  static void SetCaptureEnabled(bool enabled);
+  enum class CaptureMode: int32_t {
+    DISABLED,       // Don't capture anything
+    PSEUDO_STACK,   // GetContextSnapshot() returns pseudo stack trace
+    NATIVE_STACK    // GetContextSnapshot() returns native (real) stack trace
+  };
 
-  // Returns whether capturing allocation context is enabled globally.
-  inline static bool capture_enabled() {
+  // Globally sets capturing mode.
+  // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
+  static void SetCaptureMode(CaptureMode mode);
+
+  // Returns global capturing mode.
+  inline static CaptureMode capture_mode() {
     // A little lag after heap profiling is enabled or disabled is fine, it is
     // more important that the check is as cheap as possible when capturing is
     // not enabled, so do not issue a memory barrier in the fast path.
-    if (subtle::NoBarrier_Load(&capture_enabled_) == 0)
-      return false;
+    if (subtle::NoBarrier_Load(&capture_mode_) ==
+            static_cast<int32_t>(CaptureMode::DISABLED))
+      return CaptureMode::DISABLED;
 
     // In the slow path, an acquire load is required to pair with the release
-    // store in |SetCaptureEnabled|. This is to ensure that the TLS slot for
+    // store in |SetCaptureMode|. This is to ensure that the TLS slot for
     // the thread-local allocation context tracker has been initialized if
-    // |capture_enabled| returns true.
-    return subtle::Acquire_Load(&capture_enabled_) != 0;
+    // |capture_mode| returns something other than DISABLED.
+    return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
+  }
+
+  // Returns the thread-local instance, creating one if necessary. Returns
+  // always a valid instance, unless it is called re-entrantly, in which case
+  // returns nullptr in the nested calls.
+  static AllocationContextTracker* GetInstanceForCurrentThread();
+
+  // Set the thread name in the AllocationContextTracker of the current thread
+  // if capture is enabled.
+  static void SetCurrentThreadName(const char* name);
+
+  // Starts and ends a new ignore scope between which the allocations are
+  // ignored in the heap profiler. A dummy context that short circuits to
+  // "tracing_overhead" is returned for these allocations.
+  void begin_ignore_scope() { ignore_scope_depth_++; }
+  void end_ignore_scope() {
+    if (ignore_scope_depth_)
+      ignore_scope_depth_--;
   }
 
   // Pushes a frame onto the thread-local pseudo stack.
-  static void PushPseudoStackFrame(StackFrame frame);
+  void PushPseudoStackFrame(const char* trace_event_name);
 
   // Pops a frame from the thread-local pseudo stack.
-  static void PopPseudoStackFrame(StackFrame frame);
+  void PopPseudoStackFrame(const char* trace_event_name);
+
+  // Push and pop current task's context. A stack is used to support nested
+  // tasks and the top of the stack will be used in allocation context.
+  void PushCurrentTaskContext(const char* context);
+  void PopCurrentTaskContext(const char* context);
 
   // Returns a snapshot of the current thread-local context.
-  static AllocationContext GetContextSnapshot();
+  AllocationContext GetContextSnapshot();
 
   ~AllocationContextTracker();
 
  private:
   AllocationContextTracker();
 
-  static AllocationContextTracker* GetThreadLocalTracker();
-
-  static subtle::Atomic32 capture_enabled_;
+  static subtle::Atomic32 capture_mode_;
 
   // The pseudo stack where frames are |TRACE_EVENT| names.
-  std::vector<StackFrame> pseudo_stack_;
+  std::vector<const char*> pseudo_stack_;
+
+  // The thread name is used as the first entry in the pseudo stack.
+  const char* thread_name_;
+
+  // Stack of tasks' contexts. Context serves as a different dimension than
+  // pseudo stack to cluster allocations.
+  std::vector<const char*> task_contexts_;
+
+  uint32_t ignore_scope_depth_;
 
   DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
 };
diff --git a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 58255ad..07d5f25 100644
--- a/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -7,6 +7,8 @@
 #include <iterator>
 
 #include "base/memory/ref_counted.h"
+#include "base/pending_task.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 #include "base/trace_event/trace_event.h"
@@ -17,6 +19,7 @@
 
 // Define all strings once, because the pseudo stack requires pointer equality,
 // and string interning is unreliable.
+const char kThreadName[] = "TestThread";
 const char kCupcake[] = "Cupcake";
 const char kDonut[] = "Donut";
 const char kEclair[] = "Eclair";
@@ -27,10 +30,12 @@
 // in |AllocationContextTracker::GetContextSnapshot|.
 template <size_t N>
 void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
-  AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
 
   auto actual = std::begin(ctx.backtrace.frames);
-  auto actual_bottom = std::end(ctx.backtrace.frames);
+  auto actual_bottom = actual + ctx.backtrace.frame_count;
   auto expected = std::begin(expected_backtrace);
   auto expected_bottom = std::end(expected_backtrace);
 
@@ -45,11 +50,14 @@
   ASSERT_EQ(expected, expected_bottom);
 }
 
-void AssertBacktraceEmpty() {
-  AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
+void AssertBacktraceContainsOnlyThreadName() {
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
 
-  for (StackFrame frame : ctx.backtrace.frames)
-    ASSERT_EQ(nullptr, frame);
+  ASSERT_EQ(1u, ctx.backtrace.frame_count);
+  ASSERT_EQ(t, ctx.backtrace.frames[0]);
 }
 
 class AllocationContextTrackerTest : public testing::Test {
@@ -57,34 +65,36 @@
   void SetUp() override {
     TraceConfig config("");
     TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
-    AllocationContextTracker::SetCaptureEnabled(true);
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+    AllocationContextTracker::SetCurrentThreadName(kThreadName);
   }
 
   void TearDown() override {
-    AllocationContextTracker::SetCaptureEnabled(false);
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::DISABLED);
     TraceLog::GetInstance()->SetDisabled();
   }
 };
 
 // Check that |TRACE_EVENT| macros push and pop to the pseudo stack correctly.
-// Also check that |GetContextSnapshot| fills the backtrace with null pointers
-// when the pseudo stack height is less than the capacity.
 TEST_F(AllocationContextTrackerTest, PseudoStackScopedTrace) {
-  StackFrame c = kCupcake;
-  StackFrame d = kDonut;
-  StackFrame e = kEclair;
-  StackFrame f = kFroyo;
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   {
     TRACE_EVENT0("Testing", kCupcake);
-    StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+    StackFrame frame_c[] = {t, c};
     AssertBacktraceEquals(frame_c);
 
     {
       TRACE_EVENT0("Testing", kDonut);
-      StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+      StackFrame frame_cd[] = {t, c, d};
       AssertBacktraceEquals(frame_cd);
     }
 
@@ -92,38 +102,39 @@
 
     {
       TRACE_EVENT0("Testing", kEclair);
-      StackFrame frame_ce[] = {c, e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+      StackFrame frame_ce[] = {t, c, e};
       AssertBacktraceEquals(frame_ce);
     }
 
     AssertBacktraceEquals(frame_c);
   }
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   {
     TRACE_EVENT0("Testing", kFroyo);
-    StackFrame frame_f[] = {f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+    StackFrame frame_f[] = {t, f};
     AssertBacktraceEquals(frame_f);
   }
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 }
 
 // Same as |PseudoStackScopedTrace|, but now test the |TRACE_EVENT_BEGIN| and
 // |TRACE_EVENT_END| macros.
 TEST_F(AllocationContextTrackerTest, PseudoStackBeginEndTrace) {
-  StackFrame c = kCupcake;
-  StackFrame d = kDonut;
-  StackFrame e = kEclair;
-  StackFrame f = kFroyo;
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
 
-  StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_ce[] = {c, e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_f[] = {f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+  StackFrame frame_c[] = {t, c};
+  StackFrame frame_cd[] = {t, c, d};
+  StackFrame frame_ce[] = {t, c, e};
+  StackFrame frame_f[] = {t, f};
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   TRACE_EVENT_BEGIN0("Testing", kCupcake);
   AssertBacktraceEquals(frame_c);
@@ -141,27 +152,28 @@
   AssertBacktraceEquals(frame_c);
   TRACE_EVENT_END0("Testing", kCupcake);
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   TRACE_EVENT_BEGIN0("Testing", kFroyo);
   AssertBacktraceEquals(frame_f);
   TRACE_EVENT_END0("Testing", kFroyo);
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 }
 
 TEST_F(AllocationContextTrackerTest, PseudoStackMixedTrace) {
-  StackFrame c = kCupcake;
-  StackFrame d = kDonut;
-  StackFrame e = kEclair;
-  StackFrame f = kFroyo;
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame d = StackFrame::FromTraceEventName(kDonut);
+  StackFrame e = StackFrame::FromTraceEventName(kEclair);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
 
-  StackFrame frame_c[] = {c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_cd[] = {c, d, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_e[] = {e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-  StackFrame frame_ef[] = {e, f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+  StackFrame frame_c[] = {t, c};
+  StackFrame frame_cd[] = {t, c, d};
+  StackFrame frame_e[] = {t, e};
+  StackFrame frame_ef[] = {t, e, f};
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   TRACE_EVENT_BEGIN0("Testing", kCupcake);
   AssertBacktraceEquals(frame_c);
@@ -173,7 +185,7 @@
 
   AssertBacktraceEquals(frame_c);
   TRACE_EVENT_END0("Testing", kCupcake);
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 
   {
     TRACE_EVENT0("Testing", kEclair);
@@ -185,12 +197,15 @@
     AssertBacktraceEquals(frame_e);
   }
 
-  AssertBacktraceEmpty();
+  AssertBacktraceContainsOnlyThreadName();
 }
 
 TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
-  // Push 12 events onto the pseudo stack.
-  TRACE_EVENT0("Testing", kCupcake);
+  StackFrame t = StackFrame::FromThreadName(kThreadName);
+  StackFrame c = StackFrame::FromTraceEventName(kCupcake);
+  StackFrame f = StackFrame::FromTraceEventName(kFroyo);
+
+  // Push 11 events onto the pseudo stack.
   TRACE_EVENT0("Testing", kCupcake);
   TRACE_EVENT0("Testing", kCupcake);
   TRACE_EVENT0("Testing", kCupcake);
@@ -207,19 +222,64 @@
 
   {
     TRACE_EVENT0("Testing", kGingerbread);
-    AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
+    AllocationContext ctx =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
 
     // The pseudo stack relies on pointer equality, not deep string comparisons.
-    ASSERT_EQ(kCupcake, ctx.backtrace.frames[0]);
-    ASSERT_EQ(kFroyo, ctx.backtrace.frames[11]);
+    ASSERT_EQ(t, ctx.backtrace.frames[0]);
+    ASSERT_EQ(c, ctx.backtrace.frames[1]);
+    ASSERT_EQ(f, ctx.backtrace.frames[11]);
   }
 
   {
-    AllocationContext ctx = AllocationContextTracker::GetContextSnapshot();
-    ASSERT_EQ(kCupcake, ctx.backtrace.frames[0]);
-    ASSERT_EQ(kFroyo, ctx.backtrace.frames[11]);
+    AllocationContext ctx =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
+    ASSERT_EQ(t, ctx.backtrace.frames[0]);
+    ASSERT_EQ(c, ctx.backtrace.frames[1]);
+    ASSERT_EQ(f, ctx.backtrace.frames[11]);
   }
 }
 
+TEST_F(AllocationContextTrackerTest, TrackTaskContext) {
+  const char kContext1[] = "context1";
+  const char kContext2[] = "context2";
+  {
+    // The context from the scoped task event should be used as type name.
+    TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
+    AllocationContext ctx1 =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
+    ASSERT_EQ(kContext1, ctx1.type_name);
+
+    // In case of nested events, the last event's context should be used.
+    TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
+    AllocationContext ctx2 =
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->GetContextSnapshot();
+    ASSERT_EQ(kContext2, ctx2.type_name);
+  }
+
+  // Type should be nullptr without task event.
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
+  ASSERT_FALSE(ctx.type_name);
+}
+
+TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
+  TRACE_EVENT0("Testing", kCupcake);
+  TRACE_EVENT0("Testing", kDonut);
+  HEAP_PROFILER_SCOPED_IGNORE;
+  AllocationContext ctx =
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->GetContextSnapshot();
+  const StringPiece kTracingOverhead("tracing_overhead");
+  ASSERT_EQ(kTracingOverhead,
+            static_cast<const char*>(ctx.backtrace.frames[0].value));
+  ASSERT_EQ(1u, ctx.backtrace.frame_count);
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
new file mode 100644
index 0000000..a0fc4be
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -0,0 +1,199 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_register.h"
+
+#include "base/trace_event/trace_event_memory_overhead.h"
+
+namespace base {
+namespace trace_event {
+
+AllocationRegister::AllocationRegister()
+    : AllocationRegister(kNumBuckets * kNumCellsPerBucket) {}
+
+AllocationRegister::AllocationRegister(uint32_t num_cells)
+    // Reserve enough address space to store |num_cells_| entries if necessary,
+    // with a guard page after it to crash the program when attempting to store
+    // more entries.
+    : num_cells_(num_cells),
+      cells_(static_cast<Cell*>(AllocateVirtualMemory(num_cells_ *
+                                                      sizeof(Cell)))),
+      buckets_(static_cast<CellIndex*>(
+          AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))),
+
+      // The free list is empty. The first unused cell is cell 1, because index
+      // 0 is used as list terminator.
+      free_list_(0),
+      next_unused_cell_(1) {}
+
+AllocationRegister::~AllocationRegister() {
+  FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex));
+  FreeVirtualMemory(cells_, num_cells_ * sizeof(Cell));
+}
+
+void AllocationRegister::Insert(void* address,
+                                size_t size,
+                                AllocationContext context) {
+  DCHECK(address != nullptr);
+  if (size == 0)
+    return;
+
+  CellIndex* idx_ptr = Lookup(address);
+
+  // If the index is 0, the address is not yet present, so insert it.
+  if (*idx_ptr == 0) {
+    *idx_ptr = GetFreeCell();
+
+    // The address stored in a cell is const as long as it is exposed (via the
+    // iterators or |Get|), but because cells are re-used, a const cast is
+    // required to set it on insert and remove.
+    void* const& allocation_address = cells_[*idx_ptr].allocation.address;
+    const_cast<void*&>(allocation_address) = address;
+    cells_[*idx_ptr].next = 0;
+  }
+
+  cells_[*idx_ptr].allocation.size = size;
+  cells_[*idx_ptr].allocation.context = context;
+}
+
+void AllocationRegister::Remove(void* address) {
+  // Get a pointer to the index of the cell that stores |address|. The index can
+  // be an element of |buckets_| or the |next| member of a cell.
+  CellIndex* idx_ptr = Lookup(address);
+  CellIndex freed_idx = *idx_ptr;
+
+  // If the index is 0, the address was not there in the first place.
+  if (freed_idx == 0)
+    return;
+
+  // The cell at the index is now free, remove it from the linked list for
+  // |Hash(address)|.
+  Cell* freed_cell = &cells_[freed_idx];
+  *idx_ptr = freed_cell->next;
+
+  // Put the free cell at the front of the free list.
+  freed_cell->next = free_list_;
+  free_list_ = freed_idx;
+
+  // Reset the address, so that on iteration the free cell is ignored.
+  const_cast<void*&>(freed_cell->allocation.address) = nullptr;
+}
+
+AllocationRegister::Allocation* AllocationRegister::Get(void* address) {
+  CellIndex* idx_ptr = Lookup(address);
+
+  // If the index is 0, the address is not present in the table.
+  return *idx_ptr == 0 ? nullptr : &cells_[*idx_ptr].allocation;
+}
+
+AllocationRegister::ConstIterator AllocationRegister::begin() const {
+  // Initialize the iterator's index to 0. Cell 0 never stores an entry.
+  ConstIterator iterator(*this, 0);
+  // Incrementing will advance the iterator to the first used cell.
+  ++iterator;
+  return iterator;
+}
+
+AllocationRegister::ConstIterator AllocationRegister::end() const {
+  // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry,
+  // so index |next_unused_cell_| is an iterator past the last element, in line
+  // with the STL iterator conventions.
+  return ConstIterator(*this, next_unused_cell_);
+}
+
+AllocationRegister::ConstIterator::ConstIterator(
+    const AllocationRegister& alloc_register,
+    CellIndex index)
+    : register_(alloc_register), index_(index) {}
+
+void AllocationRegister::ConstIterator::operator++() {
+  // Find the next cell with a non-null address until all cells that could
+  // possibly be used have been iterated. A null address indicates a free cell.
+  do {
+    index_++;
+  } while (index_ < register_.next_unused_cell_ &&
+           register_.cells_[index_].allocation.address == nullptr);
+}
+
+bool AllocationRegister::ConstIterator::operator!=(
+    const ConstIterator& other) const {
+  return index_ != other.index_;
+}
+
+const AllocationRegister::Allocation& AllocationRegister::ConstIterator::
+operator*() const {
+  return register_.cells_[index_].allocation;
+}
+
+AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) {
+  // The list head is in |buckets_| at the hash offset.
+  CellIndex* idx_ptr = &buckets_[Hash(address)];
+
+  // Chase down the list until the cell that holds |address| is found,
+  // or until the list ends.
+  while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address)
+    idx_ptr = &cells_[*idx_ptr].next;
+
+  return idx_ptr;
+}
+
+AllocationRegister::CellIndex AllocationRegister::GetFreeCell() {
+  // First try to re-use a cell from the freelist.
+  if (free_list_) {
+    CellIndex idx = free_list_;
+    free_list_ = cells_[idx].next;
+    return idx;
+  }
+
+  // Otherwise pick the next cell that has not been touched before.
+  CellIndex idx = next_unused_cell_;
+  next_unused_cell_++;
+
+  // If the hash table has too little capacity (when too little address space
+  // was reserved for |cells_|), |next_unused_cell_| can be an index outside of
+  // the allocated storage. A guard page is allocated there to crash the
+  // program in that case. There are alternative solutions:
+  // - Deal with it, increase capacity by reallocating |cells_|.
+  // - Refuse to insert and let the caller deal with it.
+  // Because free cells are re-used before accessing fresh cells with a higher
+  // index, and because reserving address space without touching it is cheap,
+  // the simplest solution is to just allocate a humongous chunk of address
+  // space.
+
+  DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+
+  return idx;
+}
+
+// static
+uint32_t AllocationRegister::Hash(void* address) {
+  // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
+  // been chosen carefully based on measurements with real-word data (addresses
+  // recorded from a Chrome trace run). It is the first prime after 2^17. For
+  // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
+  // buckets. Microbenchmarks show that this simple scheme outperforms fancy
+  // hashes like Murmur3 by 20 to 40 percent.
+  const uintptr_t key = reinterpret_cast<uintptr_t>(address);
+  const uintptr_t a = 131101;
+  const uintptr_t shift = 14;
+  const uintptr_t h = (key * a) >> shift;
+  return static_cast<uint32_t>(h) & kNumBucketsMask;
+}
+
+void AllocationRegister::EstimateTraceMemoryOverhead(
+    TraceEventMemoryOverhead* overhead) const {
+  // Estimate memory overhead by counting all of the cells that have ever been
+  // touched. Don't report mmapped memory as allocated, because it has not been
+  // allocated by malloc.
+  size_t allocated = sizeof(AllocationRegister);
+  size_t resident = sizeof(AllocationRegister)
+                    // Include size of touched cells (size of |*cells_|).
+                    + sizeof(Cell) * next_unused_cell_
+                    // Size of |*buckets_|.
+                    + sizeof(CellIndex) * kNumBuckets;
+  overhead->Add("AllocationRegister", allocated, resident);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
new file mode 100644
index 0000000..976f2f5
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -0,0 +1,176 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEventMemoryOverhead;
+
+// The allocation register keeps track of all allocations that have not been
+// freed. It is a memory map-backed hash table that stores size and context
+// indexed by address. The hash table is tailored specifically for this use
+// case. The common case is that an entry is inserted and removed after a
+// while, lookup without modifying the table is not an intended use case. The
+// hash table is implemented as an array of linked lists. The size of this
+// array is fixed, but it does not limit the amount of entries that can be
+// stored.
+//
+// Replaying a recording of Chrome's allocations and frees against this hash
+// table takes about 15% of the time that it takes to replay them against
+// |std::map|.
+class BASE_EXPORT AllocationRegister {
+ public:
+  // The data stored in the hash table;
+  // contains the details about an allocation.
+  struct Allocation {
+    void* const address;
+    size_t size;
+    AllocationContext context;
+  };
+
+  // An iterator that iterates entries in the hash table efficiently, but in no
+  // particular order. It can do this by iterating the cells and ignoring the
+  // linked lists altogether. Instead of checking whether a cell is in the free
+  // list to see if it should be skipped, a null address is used to indicate
+  // that a cell is free.
+  class BASE_EXPORT ConstIterator {
+   public:
+    void operator++();
+    bool operator!=(const ConstIterator& other) const;
+    const Allocation& operator*() const;
+
+   private:
+    friend class AllocationRegister;
+    using CellIndex = uint32_t;
+
+    ConstIterator(const AllocationRegister& alloc_register, CellIndex index);
+
+    const AllocationRegister& register_;
+    CellIndex index_;
+  };
+
+  AllocationRegister();
+  explicit AllocationRegister(uint32_t num_cells);
+
+  ~AllocationRegister();
+
+  // Inserts allocation details into the table. If the address was present
+  // already, its details are updated. |address| must not be null. (This is
+  // because null is used to mark free cells, to allow efficient iteration of
+  // the hash table.)
+  void Insert(void* address, size_t size, AllocationContext context);
+
+  // Removes the address from the table if it is present. It is ok to call this
+  // with a null pointer.
+  void Remove(void* address);
+
+  // Returns a pointer to the allocation at the address, or null if there is no
+  // allocation at that address. This can be used to change the allocation
+  // context after insertion, for example to change the type name.
+  Allocation* Get(void* address);
+
+  ConstIterator begin() const;
+  ConstIterator end() const;
+
+  // Estimates memory overhead including |sizeof(AllocationRegister)|.
+  void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
+
+ private:
+  friend class AllocationRegisterTest;
+  using CellIndex = uint32_t;
+
+  // A cell can store allocation details (size and context) by address. Cells
+  // are part of a linked list via the |next| member. This list is either the
+  // list for a particular hash, or the free list. All cells are contiguous in
+  // memory in one big array. Therefore, on 64-bit systems, space can be saved
+  // by storing 32-bit indices instead of pointers as links. Index 0 is used as
+  // the list terminator.
+  struct Cell {
+    CellIndex next;
+    Allocation allocation;
+  };
+
+  // The number of buckets, 2^17, approximately 130 000, has been tuned for
+  // Chrome's typical number of outstanding allocations. (This number varies
+  // between processes. Most processes have a sustained load of ~30k unfreed
+  // allocations, but some processes have peeks around 100k-400k allocations.)
+  // Because of the size of the table, it is likely that every |buckets_|
+  // access and every |cells_| access will incur a cache miss. Microbenchmarks
+  // suggest that it is worthwile to use more memory for the table to avoid
+  // chasing down the linked list, until the size is 2^18. The number of buckets
+  // is a power of two so modular indexing can be done with bitwise and.
+  static const uint32_t kNumBuckets = 0x20000;
+  static const uint32_t kNumBucketsMask = kNumBuckets - 1;
+
+  // Reserve address space to store at most this number of entries. High
+  // capacity does not imply high memory usage due to the access pattern. The
+  // only constraint on the number of cells is that on 32-bit systems address
+  // space is scarce (i.e. reserving 2GiB of address space for the entries is
+  // not an option). A value of ~3M entries is large enough to handle spikes in
+  // the number of allocations, and modest enough to require no more than a few
+  // dozens of MiB of address space.
+  static const uint32_t kNumCellsPerBucket = 10;
+
+  // Returns a value in the range [0, kNumBuckets - 1] (inclusive).
+  static uint32_t Hash(void* address);
+
+  // Allocates a region of virtual address space of |size| rounded up to the
+  // system page size. The memory is zeroed by the system. A guard page is
+  // added after the end.
+  static void* AllocateVirtualMemory(size_t size);
+
+  // Frees a region of virtual address space allocated by a call to
+  // |AllocateVirtualMemory|.
+  static void FreeVirtualMemory(void* address, size_t allocated_size);
+
+  // Returns a pointer to the variable that contains or should contain the
+  // index of the cell that stores the entry for |address|. The pointer may
+  // point at an element of |buckets_| or at the |next| member of an element of
+  // |cells_|. If the value pointed at is 0, |address| is not in the table.
+  CellIndex* Lookup(void* address);
+
+  // Takes a cell that is not being used to store an entry (either by recycling
+  // from the free list or by taking a fresh cell) and returns its index.
+  CellIndex GetFreeCell();
+
+  // The maximum number of cells which can be allocated.
+  uint32_t const num_cells_;
+
+  // The array of cells. This array is backed by mmapped memory. Lower indices
+  // are accessed first, higher indices are only accessed when required. In
+  // this way, even if a huge amount of address space has been mmapped, only
+  // the cells that are actually used will be backed by physical memory.
+  Cell* const cells_;
+
+  // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain
+  // the index of the head of the linked list for |Hash(address)|. A value of 0
+  // indicates an empty list. This array is backed by mmapped memory.
+  CellIndex* const buckets_;
+
+  // The head of the free list. This is the index of the cell. A value of 0
+  // means that the free list is empty.
+  CellIndex free_list_;
+
+  // The index of the first element of |cells_| that has not been used before.
+  // If the free list is empty and a new cell is needed, the cell at this index
+  // is used. This is the high water mark for the number of entries stored.
+  CellIndex next_unused_cell_;
+
+  DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
+};
+
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
diff --git a/base/trace_event/heap_profiler_allocation_register_posix.cc b/base/trace_event/heap_profiler_allocation_register_posix.cc
new file mode 100644
index 0000000..c38d7e6
--- /dev/null
+++ b/base/trace_event/heap_profiler_allocation_register_posix.cc
@@ -0,0 +1,59 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_allocation_register.h"
+
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/process/process_metrics.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+size_t GetGuardSize() {
+  return GetPageSize();
+}
+}
+
+// static
+void* AllocationRegister::AllocateVirtualMemory(size_t size) {
+  size = bits::Align(size, GetPageSize());
+
+  // Add space for a guard page at the end.
+  size_t map_size = size + GetGuardSize();
+
+  void* addr = mmap(nullptr, map_size, PROT_READ | PROT_WRITE,
+                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+  PCHECK(addr != MAP_FAILED);
+
+  // Mark the last page of the allocated address space as inaccessible
+  // (PROT_NONE). The read/write accessible space is still at least |min_size|
+  // bytes.
+  void* guard_addr =
+      reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + size);
+  int result = mprotect(guard_addr, GetGuardSize(), PROT_NONE);
+  PCHECK(result == 0);
+
+  return addr;
+}
+
+// static
+void AllocationRegister::FreeVirtualMemory(void* address,
+                                           size_t allocated_size) {
+  size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
+  munmap(address, size);
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
new file mode 100644
index 0000000..1bf06db
--- /dev/null
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -0,0 +1,323 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iterator>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/memory_dump_session_state.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "base/trace_event/trace_log.h"
+
+// Most of what the |HeapDumpWriter| does is aggregating detailed information
+// about the heap and deciding what to dump. The Input to this process is a list
+// of |AllocationContext|s and size pairs.
+//
+// The pairs are grouped into |Bucket|s. A bucket is a group of (context, size)
+// pairs where the properties of the contexts share a prefix. (Type name is
+// considered a list of length one here.) First all pairs are put into one
+// bucket that represents the entire heap. Then this bucket is recursively
+// broken down into smaller buckets. Each bucket keeps track of whether further
+// breakdown is possible.
+
+namespace base {
+namespace trace_event {
+namespace internal {
+namespace {
+
+// Denotes a property of |AllocationContext| to break down by.
+enum class BreakDownMode { kByBacktrace, kByTypeName };
+
+// A group of bytes for which the context shares a prefix.
+struct Bucket {
+  Bucket()
+      : size(0),
+        count(0),
+        backtrace_cursor(0),
+        is_broken_down_by_type_name(false) {}
+
+  std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
+      metrics_by_context;
+
+  // The sum of the sizes of |metrics_by_context|.
+  size_t size;
+
+  // The sum of number of allocations of |metrics_by_context|.
+  size_t count;
+
+  // The index of the stack frame that has not yet been broken down by. For all
+  // elements in this bucket, the stack frames 0 up to (but not including) the
+  // cursor, must be equal.
+  size_t backtrace_cursor;
+
+  // When true, the type name for all elements in this bucket must be equal.
+  bool is_broken_down_by_type_name;
+};
+
+// Comparison operator to order buckets by their size.
+bool operator<(const Bucket& lhs, const Bucket& rhs) {
+  return lhs.size < rhs.size;
+}
+
+// Groups the allocations in the bucket by |break_by|. The buckets in the
+// returned list will have |backtrace_cursor| advanced or
+// |is_broken_down_by_type_name| set depending on the property to group by.
+std::vector<Bucket> GetSubbuckets(const Bucket& bucket,
+                                  BreakDownMode break_by) {
+  base::hash_map<const void*, Bucket> breakdown;
+
+
+  if (break_by == BreakDownMode::kByBacktrace) {
+    for (const auto& context_and_metrics : bucket.metrics_by_context) {
+      const Backtrace& backtrace = context_and_metrics.first->backtrace;
+      const StackFrame* begin = std::begin(backtrace.frames);
+      const StackFrame* end = begin + backtrace.frame_count;
+      const StackFrame* cursor = begin + bucket.backtrace_cursor;
+
+      DCHECK_LE(cursor, end);
+
+      if (cursor != end) {
+        Bucket& subbucket = breakdown[cursor->value];
+        subbucket.size += context_and_metrics.second.size;
+        subbucket.count += context_and_metrics.second.count;
+        subbucket.metrics_by_context.push_back(context_and_metrics);
+        subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
+        subbucket.is_broken_down_by_type_name =
+            bucket.is_broken_down_by_type_name;
+        DCHECK_GT(subbucket.size, 0u);
+        DCHECK_GT(subbucket.count, 0u);
+      }
+    }
+  } else if (break_by == BreakDownMode::kByTypeName) {
+    if (!bucket.is_broken_down_by_type_name) {
+      for (const auto& context_and_metrics : bucket.metrics_by_context) {
+        const AllocationContext* context = context_and_metrics.first;
+        Bucket& subbucket = breakdown[context->type_name];
+        subbucket.size += context_and_metrics.second.size;
+        subbucket.count += context_and_metrics.second.count;
+        subbucket.metrics_by_context.push_back(context_and_metrics);
+        subbucket.backtrace_cursor = bucket.backtrace_cursor;
+        subbucket.is_broken_down_by_type_name = true;
+        DCHECK_GT(subbucket.size, 0u);
+        DCHECK_GT(subbucket.count, 0u);
+      }
+    }
+  }
+
+  std::vector<Bucket> buckets;
+  buckets.reserve(breakdown.size());
+  for (auto key_bucket : breakdown)
+    buckets.push_back(key_bucket.second);
+
+  return buckets;
+}
+
+// Breaks down the bucket by |break_by|. Returns only buckets that contribute
+// more than |min_size_bytes| to the total size. The long tail is omitted.
+std::vector<Bucket> BreakDownBy(const Bucket& bucket,
+                                BreakDownMode break_by,
+                                size_t min_size_bytes) {
+  std::vector<Bucket> buckets = GetSubbuckets(bucket, break_by);
+
+  // Ensure that |buckets| is a max-heap (the data structure, not memory heap),
+  // so its front contains the largest bucket. Buckets should be iterated
+  // ordered by size, but sorting the vector is overkill because the long tail
+  // of small buckets will be discarded. By using a max-heap, the optimal case
+  // where all but the first bucket are discarded is O(n). The worst case where
+  // no bucket is discarded is doing a heap sort, which is O(n log n).
+  std::make_heap(buckets.begin(), buckets.end());
+
+  // Keep including buckets until adding one would increase the number of
+  // bytes accounted for by |min_size_bytes|. The large buckets end up in
+  // [it, end()), [begin(), it) is the part that contains the max-heap
+  // of small buckets.
+  std::vector<Bucket>::iterator it;
+  for (it = buckets.end(); it != buckets.begin(); --it) {
+    if (buckets.front().size < min_size_bytes)
+      break;
+
+    // Put the largest bucket in [begin, it) at |it - 1| and max-heapify
+    // [begin, it - 1). This puts the next largest bucket at |buckets.front()|.
+    std::pop_heap(buckets.begin(), it);
+  }
+
+  // At this point, |buckets| looks like this (numbers are bucket sizes):
+  //
+  // <-- max-heap of small buckets --->
+  //                                  <-- large buckets by ascending size -->
+  // [ 19 | 11 | 13 | 7 | 2 | 5 | ... | 83 | 89 | 97 ]
+  //   ^                                ^              ^
+  //   |                                |              |
+  //   begin()                          it             end()
+
+  // Discard the long tail of buckets that contribute less than a percent.
+  buckets.erase(buckets.begin(), it);
+
+  return buckets;
+}
+
+}  // namespace
+
+bool operator<(Entry lhs, Entry rhs) {
+  // There is no need to compare |size|. If the backtrace and type name are
+  // equal then the sizes must be equal as well.
+  return std::tie(lhs.stack_frame_id, lhs.type_id) <
+         std::tie(rhs.stack_frame_id, rhs.type_id);
+}
+
+HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+                               TypeNameDeduplicator* type_name_deduplicator,
+                               uint32_t breakdown_threshold_bytes)
+    : stack_frame_deduplicator_(stack_frame_deduplicator),
+      type_name_deduplicator_(type_name_deduplicator),
+      breakdown_threshold_bytes_(breakdown_threshold_bytes) {
+}
+
+HeapDumpWriter::~HeapDumpWriter() {}
+
+bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
+  // The contexts in the bucket are all different, but the [begin, cursor) range
+  // is equal for all contexts in the bucket, and the type names are the same if
+  // |is_broken_down_by_type_name| is set.
+  DCHECK(!bucket.metrics_by_context.empty());
+
+  const AllocationContext* context = bucket.metrics_by_context.front().first;
+
+  const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
+  const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
+  DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
+
+  Entry entry;
+  entry.stack_frame_id = stack_frame_deduplicator_->Insert(
+      backtrace_begin, backtrace_end);
+
+  // Deduplicate the type name, or use ID -1 if type name is not set.
+  entry.type_id = bucket.is_broken_down_by_type_name
+                      ? type_name_deduplicator_->Insert(context->type_name)
+                      : -1;
+
+  entry.size = bucket.size;
+  entry.count = bucket.count;
+
+  auto position_and_inserted = entries_.insert(entry);
+  return position_and_inserted.second;
+}
+
+void HeapDumpWriter::BreakDown(const Bucket& bucket) {
+  auto by_backtrace = BreakDownBy(bucket,
+                                  BreakDownMode::kByBacktrace,
+                                  breakdown_threshold_bytes_);
+  auto by_type_name = BreakDownBy(bucket,
+                                  BreakDownMode::kByTypeName,
+                                  breakdown_threshold_bytes_);
+
+  // Insert entries for the buckets. If a bucket was not present before, it has
+  // not been broken down before, so recursively continue breaking down in that
+  // case. There might be multiple routes to the same entry (first break down
+  // by type name, then by backtrace, or first by backtrace and then by type),
+  // so a set is used to avoid dumping and breaking down entries more than once.
+
+  for (const Bucket& subbucket : by_backtrace)
+    if (AddEntryForBucket(subbucket))
+      BreakDown(subbucket);
+
+  for (const Bucket& subbucket : by_type_name)
+    if (AddEntryForBucket(subbucket))
+      BreakDown(subbucket);
+}
+
+const std::set<Entry>& HeapDumpWriter::Summarize(
+    const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context) {
+  // Start with one bucket that represents the entire heap. Iterate by
+  // reference, because the allocation contexts are going to point to allocation
+  // contexts stored in |metrics_by_context|.
+  Bucket root_bucket;
+  for (const auto& context_and_metrics : metrics_by_context) {
+    DCHECK_GT(context_and_metrics.second.size, 0u);
+    DCHECK_GT(context_and_metrics.second.count, 0u);
+    const AllocationContext* context = &context_and_metrics.first;
+    root_bucket.metrics_by_context.push_back(
+        std::make_pair(context, context_and_metrics.second));
+    root_bucket.size += context_and_metrics.second.size;
+    root_bucket.count += context_and_metrics.second.count;
+  }
+
+  AddEntryForBucket(root_bucket);
+
+  // Recursively break down the heap and fill |entries_| with entries to dump.
+  BreakDown(root_bucket);
+
+  return entries_;
+}
+
+std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
+  std::string buffer;
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
+
+  traced_value->BeginArray("entries");
+
+  for (const Entry& entry : entries) {
+    traced_value->BeginDictionary();
+
+    // Format size as hexadecimal string into |buffer|.
+    SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
+    traced_value->SetString("size", buffer);
+
+    SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
+    traced_value->SetString("count", buffer);
+
+    if (entry.stack_frame_id == -1) {
+      // An empty backtrace (which will have ID -1) is represented by the empty
+      // string, because there is no leaf frame to reference in |stackFrames|.
+      traced_value->SetString("bt", "");
+    } else {
+      // Format index of the leaf frame as a string, because |stackFrames| is a
+      // dictionary, not an array.
+      SStringPrintf(&buffer, "%i", entry.stack_frame_id);
+      traced_value->SetString("bt", buffer);
+    }
+
+    // Type ID -1 (cumulative size for all types) is represented by the absence
+    // of the "type" key in the dictionary.
+    if (entry.type_id != -1) {
+      // Format the type ID as a string.
+      SStringPrintf(&buffer, "%i", entry.type_id);
+      traced_value->SetString("type", buffer);
+    }
+
+    traced_value->EndDictionary();
+  }
+
+  traced_value->EndArray();  // "entries"
+  return traced_value;
+}
+
+}  // namespace internal
+
+std::unique_ptr<TracedValue> ExportHeapDump(
+    const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
+    const MemoryDumpSessionState& session_state) {
+  internal::HeapDumpWriter writer(
+      session_state.stack_frame_deduplicator(),
+      session_state.type_name_deduplicator(),
+      session_state.memory_dump_config().heap_profiler_options
+          .breakdown_threshold_bytes);
+  return Serialize(writer.Summarize(metrics_by_context));
+}
+
+}  // namespace trace_event
+}  // namespace base
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.h b/base/trace_event/heap_profiler_heap_dump_writer.h
new file mode 100644
index 0000000..6e9d29d
--- /dev/null
+++ b/base/trace_event/heap_profiler_heap_dump_writer.h
@@ -0,0 +1,113 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+
+#include "base/base_export.h"
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+
+namespace base {
+namespace trace_event {
+
+class MemoryDumpSessionState;
+class StackFrameDeduplicator;
+class TracedValue;
+class TypeNameDeduplicator;
+
+// Aggregates |metrics_by_context|, recursively breaks down the heap, and
+// returns a traced value with an "entries" array that can be dumped in the
+// trace log, following the format described in https://goo.gl/KY7zVE. The
+// number of entries is kept reasonable because long tails are not included.
+BASE_EXPORT std::unique_ptr<TracedValue> ExportHeapDump(
+    const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
+    const MemoryDumpSessionState& session_state);
+
+namespace internal {
+
+namespace {
+struct Bucket;
+}
+
+// An entry in the "entries" array as described in https://goo.gl/KY7zVE.
+struct BASE_EXPORT Entry {
+  size_t size;
+  size_t count;
+
+  // References a backtrace in the stack frame deduplicator. -1 means empty
+  // backtrace (the root of the tree).
+  int stack_frame_id;
+
+  // References a type name in the type name deduplicator. -1 indicates that
+  // the size is the cumulative size for all types (the root of the tree).
+  int type_id;
+};
+
+// Comparison operator to enable putting |Entry| in a |std::set|.
+BASE_EXPORT bool operator<(Entry lhs, Entry rhs);
+
+// Serializes entries to an "entries" array in a traced value.
+BASE_EXPORT std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& dump);
+
+// Helper class to dump a snapshot of an |AllocationRegister| or other heap
+// bookkeeping structure into a |TracedValue|. This class is intended to be
+// used as a one-shot local instance on the stack.
+class BASE_EXPORT HeapDumpWriter {
+ public:
+  // The |stack_frame_deduplicator| and |type_name_deduplicator| are not owned.
+  // The heap dump writer assumes exclusive access to them during the lifetime
+  // of the dump writer. The heap dumps are broken down for allocations bigger
+  // than |breakdown_threshold_bytes|.
+  HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator,
+                 TypeNameDeduplicator* type_name_deduplicator,
+                 uint32_t breakdown_threshold_bytes);
+
+  ~HeapDumpWriter();
+
+  // Aggregates allocations to compute the total size of the heap, then breaks
+  // down the heap recursively. This produces the values that should be dumped
+  // in the "entries" array. The number of entries is kept reasonable because
+  // long tails are not included. Use |Serialize| to convert to a traced value.
+  const std::set<Entry>& Summarize(
+      const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context);
+
+ private:
+  // Inserts an |Entry| for |Bucket| into |entries_|. Returns false if the
+  // entry was present before, true if it was not.
+  bool AddEntryForBucket(const Bucket& bucket);
+
+  // Recursively breaks down a bucket into smaller buckets and adds entries for
+  // the buckets worth dumping to |entries_|.
+  void BreakDown(const Bucket& bucket);
+
+  // The collection of entries that is filled by |Summarize|.
+  std::set<Entry> entries_;
+
+  // Helper for generating the |stackFrames| dictionary. Not owned, must outlive
+  // this heap dump writer instance.
+  StackFrameDeduplicator* const stack_frame_deduplicator_;
+
+  // Helper for converting type names to IDs. Not owned, must outlive this heap
+  // dump writer instance.
+  TypeNameDeduplicator* const type_name_deduplicator_;
+
+  // Minimum size of an allocation for which an allocation bucket will be
+  // broken down with children.
+  uint32_t breakdown_threshold_bytes_;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapDumpWriter);
+};
+
+}  // namespace internal
+}  // namespace trace_event
+}  // namespace base
+
+#endif  // BASE_TRACE_EVENT_HEAP_PROFILER_HEAP_DUMP_WRITER_H_
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index 9568525..49a2350 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -4,6 +4,7 @@
 
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 
+#include <inttypes.h>
 #include <stddef.h>
 
 #include <string>
@@ -31,7 +32,7 @@
   std::map<StackFrame, int>* nodes = &roots_;
 
   // Loop through the frames, early out when a frame is null.
-  for (const StackFrame* it = beginFrame; it != endFrame && *it; it++) {
+  for (const StackFrame* it = beginFrame; it != endFrame; it++) {
     StackFrame frame = *it;
 
     auto node = nodes->find(frame);
@@ -77,8 +78,26 @@
     SStringPrintf(&stringify_buffer, "\"%d\":", i);
     out->append(stringify_buffer);
 
-    scoped_ptr<TracedValue> frame_node_value(new TracedValue);
-    frame_node_value->SetString("name", frame_node->frame);
+    std::unique_ptr<TracedValue> frame_node_value(new TracedValue);
+    const StackFrame& frame = frame_node->frame;
+    switch (frame.type) {
+      case StackFrame::Type::TRACE_EVENT_NAME:
+        frame_node_value->SetString(
+            "name", static_cast<const char*>(frame.value));
+        break;
+      case StackFrame::Type::THREAD_NAME:
+        SStringPrintf(&stringify_buffer,
+                      "[Thread: %s]",
+                      static_cast<const char*>(frame.value));
+        frame_node_value->SetString("name", stringify_buffer);
+        break;
+      case StackFrame::Type::PROGRAM_COUNTER:
+        SStringPrintf(&stringify_buffer,
+                      "pc:%" PRIxPTR,
+                      reinterpret_cast<uintptr_t>(frame.value));
+        frame_node_value->SetString("name", stringify_buffer);
+        break;
+    }
     if (frame_node->parent_frame_index >= 0) {
       SStringPrintf(&stringify_buffer, "%d", frame_node->parent_frame_index);
       frame_node_value->SetString("parent", stringify_buffer);
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
index 2464036..2215ede 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
@@ -2,12 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
+
 #include <iterator>
+#include <memory>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
-#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -15,11 +16,11 @@
 
 // Define all strings once, because the deduplicator requires pointer equality,
 // and string interning is unreliable.
-const char kBrowserMain[] = "BrowserMain";
-const char kRendererMain[] = "RendererMain";
-const char kCreateWidget[] = "CreateWidget";
-const char kInitialize[] = "Initialize";
-const char kMalloc[] = "malloc";
+StackFrame kBrowserMain = StackFrame::FromTraceEventName("BrowserMain");
+StackFrame kRendererMain = StackFrame::FromTraceEventName("RendererMain");
+StackFrame kCreateWidget = StackFrame::FromTraceEventName("CreateWidget");
+StackFrame kInitialize = StackFrame::FromTraceEventName("Initialize");
+StackFrame kMalloc = StackFrame::FromTraceEventName("malloc");
 
 TEST(StackFrameDeduplicatorTest, SingleBacktrace) {
   StackFrame bt[] = {kBrowserMain, kCreateWidget, kMalloc};
@@ -30,7 +31,7 @@
   //   CreateWidget [1]
   //     malloc [2]
 
-  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
 
   auto iter = dedup->begin();
@@ -46,6 +47,35 @@
   ASSERT_EQ(iter + 3, dedup->end());
 }
 
+TEST(StackFrameDeduplicatorTest, SingleBacktraceWithNull) {
+  StackFrame null_frame = StackFrame::FromTraceEventName(nullptr);
+  StackFrame bt[] = {kBrowserMain, null_frame, kMalloc};
+
+  // Deduplicator doesn't care about what's inside StackFrames,
+  // and handles nullptr StackFrame values as any other.
+  //
+  // So the call tree should look like this (index in brackets).
+  //
+  // BrowserMain [0]
+  //   (null) [1]
+  //     malloc [2]
+
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
+
+  auto iter = dedup->begin();
+  ASSERT_EQ(kBrowserMain, (iter + 0)->frame);
+  ASSERT_EQ(-1, (iter + 0)->parent_frame_index);
+
+  ASSERT_EQ(null_frame, (iter + 1)->frame);
+  ASSERT_EQ(0, (iter + 1)->parent_frame_index);
+
+  ASSERT_EQ(kMalloc, (iter + 2)->frame);
+  ASSERT_EQ(1, (iter + 2)->parent_frame_index);
+
+  ASSERT_EQ(iter + 3, dedup->end());
+}
+
 // Test that there can be different call trees (there can be multiple bottom
 // frames). Also verify that frames with the same name but a different caller
 // are represented as distinct nodes.
@@ -63,7 +93,7 @@
   // Note that there will be two instances of CreateWidget,
   // with different parents.
 
-  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
   ASSERT_EQ(3, dedup->Insert(std::begin(bt1), std::end(bt1)));
 
@@ -95,7 +125,7 @@
   //
   // Note that BrowserMain will be re-used.
 
-  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
+  std::unique_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
   ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
 
@@ -118,17 +148,5 @@
   ASSERT_EQ(dedup->begin() + 3, dedup->end());
 }
 
-TEST(StackFrameDeduplicatorTest, NullPaddingIsRemoved) {
-  StackFrame bt0[] = {kBrowserMain, nullptr, nullptr, nullptr};
-
-  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
-
-  // There are four frames in the backtrace, but the null pointers should be
-  // skipped, so only one frame is inserted, which will have index 0.
-  ASSERT_EQ(4u, arraysize(bt0));
-  ASSERT_EQ(0, dedup->Insert(std::begin(bt0), std::end(bt0)));
-  ASSERT_EQ(dedup->begin() + 1, dedup->end());
-}
-
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.cc b/base/trace_event/heap_profiler_type_name_deduplicator.cc
index e7f57c8..055f86a 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -16,6 +16,33 @@
 namespace base {
 namespace trace_event {
 
+namespace {
+
+// Extract directory name if |type_name| was file name. Otherwise, return
+// |type_name|.
+StringPiece ExtractDirNameFromFileName(const char* type_name) {
+  StringPiece result(type_name);
+  size_t last_seperator = result.find_last_of("\\/");
+
+  // If |type_name| was a not a file path, the seperator will not be found, so
+  // the whole type name is returned.
+  if (last_seperator == StringPiece::npos)
+    return result;
+
+  // Remove the file name from the path.
+  result.remove_suffix(result.length() - last_seperator);
+
+  // Remove the parent directory references.
+  const char kParentDirectory[] = "..";
+  const size_t kParentDirectoryLength = 3; // '../' or '..\'.
+  while (result.starts_with(kParentDirectory)) {
+    result.remove_prefix(kParentDirectoryLength);
+  }
+  return result;
+}
+
+}  // namespace
+
 TypeNameDeduplicator::TypeNameDeduplicator() {
   // A null pointer has type ID 0 ("unknown type");
   type_ids_.insert(std::make_pair(nullptr, 0));
@@ -53,9 +80,13 @@
     // a dictionary.
     SStringPrintf(&buffer, ",\"%d\":", it->second);
 
+    // TODO(ssid): crbug.com/594803 the type name is misused for file name in
+    // some cases.
+    StringPiece type_info = ExtractDirNameFromFileName(it->first);
+
     // |EscapeJSONString| appends, it does not overwrite |buffer|.
     bool put_in_quotes = true;
-    EscapeJSONString(it->first, put_in_quotes, &buffer);
+    EscapeJSONString(type_info, put_in_quotes, &buffer);
     out->append(buffer);
   }
 
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
index 92ffcf8..b2e681a 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
@@ -2,10 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <memory>
 #include <string>
 
 #include "base/json/json_reader.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -13,6 +13,8 @@
 namespace base {
 namespace trace_event {
 
+namespace {
+
 // Define all strings once, because the deduplicator requires pointer equality,
 // and string interning is unreliable.
 const char kInt[] = "int";
@@ -20,12 +22,44 @@
 const char kString[] = "string";
 const char kNeedsEscape[] = "\"quotes\"";
 
-scoped_ptr<Value> DumpAndReadBack(const TypeNameDeduplicator& deduplicator) {
+#if defined(OS_POSIX)
+const char kTaskFileName[] = "../../base/trace_event/trace_log.cc";
+const char kTaskPath[] = "base/trace_event";
+#else
+const char kTaskFileName[] = "..\\..\\base\\memory\\memory_win.cc";
+const char kTaskPath[] = "base\\memory";
+#endif
+
+std::unique_ptr<Value> DumpAndReadBack(
+    const TypeNameDeduplicator& deduplicator) {
   std::string json;
   deduplicator.AppendAsTraceFormat(&json);
   return JSONReader::Read(json);
 }
 
+// Inserts a single type name into a new TypeNameDeduplicator instance and
+// checks if the value gets inserted and the exported value for |type_name| is
+// the same as |expected_value|.
+void TestInsertTypeAndReadback(const char* type_name,
+                               const char* expected_value) {
+  std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
+  ASSERT_EQ(1, dedup->Insert(type_name));
+
+  std::unique_ptr<Value> type_names = DumpAndReadBack(*dedup);
+  ASSERT_NE(nullptr, type_names);
+
+  const DictionaryValue* dictionary;
+  ASSERT_TRUE(type_names->GetAsDictionary(&dictionary));
+
+  // When the type name was inserted, it got ID 1. The exported key "1"
+  // should be equal to |expected_value|.
+  std::string value;
+  ASSERT_TRUE(dictionary->GetString("1", &value));
+  ASSERT_EQ(expected_value, value);
+}
+
+}  // namespace
+
 TEST(TypeNameDeduplicatorTest, Deduplication) {
   // The type IDs should be like this:
   // 0: [unknown]
@@ -33,7 +67,7 @@
   // 2: bool
   // 3: string
 
-  scoped_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
+  std::unique_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(kInt));
   ASSERT_EQ(2, dedup->Insert(kBool));
   ASSERT_EQ(3, dedup->Insert(kString));
@@ -48,22 +82,14 @@
 }
 
 TEST(TypeNameDeduplicatorTest, EscapeTypeName) {
-  scoped_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
-  ASSERT_EQ(1, dedup->Insert(kNeedsEscape));
-
   // Reading json should not fail, because the type name should have been
-  // escaped properly.
-  scoped_ptr<Value> type_names = DumpAndReadBack(*dedup);
-  ASSERT_NE(nullptr, type_names);
+  // escaped properly and exported value should contain quotes.
+  TestInsertTypeAndReadback(kNeedsEscape, kNeedsEscape);
+}
 
-  const DictionaryValue* dictionary;
-  ASSERT_TRUE(type_names->GetAsDictionary(&dictionary));
-
-  // When the type name was inserted, it got ID 1. The exported key "1"
-  // should contain the name, with quotes.
-  std::string type_name;
-  ASSERT_TRUE(dictionary->GetString("1", &type_name));
-  ASSERT_EQ("\"quotes\"", type_name);
+TEST(TypeNameDeduplicatorTest, TestExtractFileName) {
+  // The exported value for passed file name should be the folders in the path.
+  TestInsertTypeAndReadback(kTaskFileName, kTaskPath);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 229a8c1..c2b6f79 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -7,7 +7,14 @@
 #include <stddef.h>
 
 #include "base/allocator/allocator_extension.h"
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/features.h"
+#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_allocation_register.h"
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
 #include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event_argument.h"
 #include "build/build_config.h"
 
 #if defined(OS_MACOSX)
@@ -19,6 +26,65 @@
 namespace base {
 namespace trace_event {
 
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+namespace {
+
+using allocator::AllocatorDispatch;
+
+void* HookAlloc(const AllocatorDispatch* self, size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->alloc_function(next, size);
+  if (ptr)
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+  return ptr;
+}
+
+void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->alloc_zero_initialized_function(next, n, size);
+  if (ptr)
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
+  return ptr;
+}
+
+void* HookllocAligned(const AllocatorDispatch* self,
+                      size_t alignment,
+                      size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->alloc_aligned_function(next, alignment, size);
+  if (ptr)
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+  return ptr;
+}
+
+void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) {
+  const AllocatorDispatch* const next = self->next;
+  void* ptr = next->realloc_function(next, address, size);
+  MallocDumpProvider::GetInstance()->RemoveAllocation(address);
+  if (size > 0)  // realloc(size == 0) means free().
+    MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
+  return ptr;
+}
+
+void HookFree(const AllocatorDispatch* self, void* address) {
+  if (address)
+    MallocDumpProvider::GetInstance()->RemoveAllocation(address);
+  const AllocatorDispatch* const next = self->next;
+  next->free_function(next, address);
+}
+
+AllocatorDispatch g_allocator_hooks = {
+    &HookAlloc,         /* alloc_function */
+    &HookZeroInitAlloc, /* alloc_zero_initialized_function */
+    &HookllocAligned,   /* alloc_aligned_function */
+    &HookRealloc,       /* realloc_function */
+    &HookFree,          /* free_function */
+    nullptr,            /* next */
+};
+
+}  // namespace
+#endif  // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+
 // static
 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
 
@@ -28,13 +94,14 @@
                    LeakySingletonTraits<MallocDumpProvider>>::get();
 }
 
-MallocDumpProvider::MallocDumpProvider() {}
+MallocDumpProvider::MallocDumpProvider()
+    : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {}
 
 MallocDumpProvider::~MallocDumpProvider() {}
 
 // Called at trace dump point time. Creates a snapshot the memory counters for
 // the current process.
-bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& /* args */,
+bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
                                       ProcessMemoryDump* pmd) {
   size_t total_virtual_size = 0;
   size_t resident_size = 0;
@@ -97,8 +164,95 @@
                           resident_size - allocated_objects_size);
   }
 
+  // Heap profiler dumps.
+  if (!heap_profiler_enabled_)
+    return true;
+
+  // The dumps of the heap profiler should be created only when heap profiling
+  // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested.
+  // However, when enabled, the overhead of the heap profiler should be always
+  // reported to avoid oscillations of the malloc total in LIGHT dumps.
+
+  tid_dumping_heap_ = PlatformThread::CurrentId();
+  // At this point the Insert/RemoveAllocation hooks will ignore this thread.
+  // Enclosing all the temporariy data structures in a scope, so that the heap
+  // profiler does not see unabalanced malloc/free calls from these containers.
+  {
+    TraceEventMemoryOverhead overhead;
+    hash_map<AllocationContext, AllocationMetrics> metrics_by_context;
+    {
+      AutoLock lock(allocation_register_lock_);
+      if (allocation_register_) {
+        if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
+          for (const auto& alloc_size : *allocation_register_) {
+            AllocationMetrics& metrics = metrics_by_context[alloc_size.context];
+            metrics.size += alloc_size.size;
+            metrics.count++;
+          }
+        }
+        allocation_register_->EstimateTraceMemoryOverhead(&overhead);
+      }
+    }  // lock(allocation_register_lock_)
+    pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
+  }
+  tid_dumping_heap_ = kInvalidThreadId;
+
   return true;
 }
 
+void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+  if (enabled) {
+    {
+      AutoLock lock(allocation_register_lock_);
+      allocation_register_.reset(new AllocationRegister());
+    }
+    allocator::InsertAllocatorDispatch(&g_allocator_hooks);
+  } else {
+    AutoLock lock(allocation_register_lock_);
+    allocation_register_.reset();
+    // Insert/RemoveAllocation below will no-op if the register is torn down.
+    // Once disabled, heap profiling will not re-enabled anymore for the
+    // lifetime of the process.
+  }
+#endif
+  heap_profiler_enabled_ = enabled;
+}
+
+void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
+  // CurrentId() can be a slow operation (crbug.com/497226). This apparently
+  // redundant condition short circuits the CurrentID() calls when unnecessary.
+  if (tid_dumping_heap_ != kInvalidThreadId &&
+      tid_dumping_heap_ == PlatformThread::CurrentId())
+    return;
+
+  // AllocationContextTracker will return nullptr when called re-reentrantly.
+  // This is the case of GetInstanceForCurrentThread() being called for the
+  // first time, which causes a new() inside the tracker which re-enters the
+  // heap profiler, in which case we just want to early out.
+  auto tracker = AllocationContextTracker::GetInstanceForCurrentThread();
+  if (!tracker)
+    return;
+  AllocationContext context = tracker->GetContextSnapshot();
+
+  AutoLock lock(allocation_register_lock_);
+  if (!allocation_register_)
+    return;
+
+  allocation_register_->Insert(address, size, context);
+}
+
+void MallocDumpProvider::RemoveAllocation(void* address) {
+  // No re-entrancy is expected here as none of the calls below should
+  // cause a free()-s (|allocation_register_| does its own heap management).
+  if (tid_dumping_heap_ != kInvalidThreadId &&
+      tid_dumping_heap_ == PlatformThread::CurrentId())
+    return;
+  AutoLock lock(allocation_register_lock_);
+  if (!allocation_register_)
+    return;
+  allocation_register_->Remove(address);
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
index 63fc1b0..4746cf5 100644
--- a/base/trace_event/malloc_dump_provider.h
+++ b/base/trace_event/malloc_dump_provider.h
@@ -6,9 +6,12 @@
 #define BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
 
 #include <istream>
+#include <memory>
 
 #include "base/macros.h"
 #include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "build/build_config.h"
 
@@ -20,6 +23,8 @@
 namespace base {
 namespace trace_event {
 
+class AllocationRegister;
+
 // Dump provider which collects process-wide memory stats.
 class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
  public:
@@ -33,12 +38,28 @@
   bool OnMemoryDump(const MemoryDumpArgs& args,
                     ProcessMemoryDump* pmd) override;
 
+  void OnHeapProfilingEnabled(bool enabled) override;
+
+  // For heap profiling.
+  void InsertAllocation(void* address, size_t size);
+  void RemoveAllocation(void* address);
+
  private:
   friend struct DefaultSingletonTraits<MallocDumpProvider>;
 
   MallocDumpProvider();
   ~MallocDumpProvider() override;
 
+  // For heap profiling.
+  bool heap_profiler_enabled_;
+  std::unique_ptr<AllocationRegister> allocation_register_;
+  Lock allocation_register_lock_;
+
+  // When in OnMemoryDump(), this contains the current thread ID.
+  // This is to prevent re-entrancy in the heap profiler when the heap dump
+  // generation is malloc/new-ing for its own bookeeping data structures.
+  PlatformThreadId tid_dumping_heap_;
+
   DISALLOW_COPY_AND_ASSIGN(MallocDumpProvider);
 };
 
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index 9f91de9..7d10236 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -7,12 +7,12 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/memory_allocator_dump_guid.h"
 #include "base/values.h"
 
@@ -93,7 +93,7 @@
  private:
   const std::string absolute_name_;
   ProcessMemoryDump* const process_memory_dump_;  // Not owned (PMD owns this).
-  scoped_ptr<TracedValue> attributes_;
+  std::unique_ptr<TracedValue> attributes_;
   MemoryAllocatorDumpGuid guid_;
   int flags_;  // See enum Flags.
 
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index 649991b..359f081 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -52,11 +52,12 @@
   }
 };
 
-scoped_ptr<Value> CheckAttribute(const MemoryAllocatorDump* dump,
-                                 const std::string& name,
-                                 const char* expected_type,
-                                 const char* expected_units) {
-  scoped_ptr<Value> raw_attrs = dump->attributes_for_testing()->ToBaseValue();
+std::unique_ptr<Value> CheckAttribute(const MemoryAllocatorDump* dump,
+                                      const std::string& name,
+                                      const char* expected_type,
+                                      const char* expected_units) {
+  std::unique_ptr<Value> raw_attrs =
+      dump->attributes_for_testing()->ToBaseValue();
   DictionaryValue* args = nullptr;
   DictionaryValue* arg = nullptr;
   std::string arg_value;
@@ -68,7 +69,7 @@
   EXPECT_TRUE(arg->GetString("units", &arg_value));
   EXPECT_EQ(expected_units, arg_value);
   EXPECT_TRUE(arg->Get("value", &out_value));
-  return out_value ? out_value->CreateDeepCopy() : scoped_ptr<Value>();
+  return out_value ? out_value->CreateDeepCopy() : std::unique_ptr<Value>();
 }
 
 void CheckString(const MemoryAllocatorDump* dump,
@@ -104,7 +105,7 @@
 }  // namespace
 
 TEST(MemoryAllocatorDumpTest, GuidGeneration) {
-  scoped_ptr<MemoryAllocatorDump> mad(
+  std::unique_ptr<MemoryAllocatorDump> mad(
       new MemoryAllocatorDump("foo", nullptr, MemoryAllocatorDumpGuid(0x42u)));
   ASSERT_EQ("42", mad->guid().ToString());
 
@@ -167,7 +168,7 @@
   ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectCount));
 
   // Check that the AsValueInfo doesn't hit any DCHECK.
-  scoped_ptr<TracedValue> traced_value(new TracedValue);
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
   pmd.AsValueInto(traced_value.get());
 }
 
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index 4ba7fcb..b14d265 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -11,8 +11,12 @@
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/compiler_specific.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/debug/debugging_flags.h"
+#include "base/debug/stack_trace.h"
+#include "base/memory/ptr_util.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
@@ -81,7 +85,7 @@
 // Proxy class which wraps a ConvertableToTraceFormat owned by the
 // |session_state| into a proxy object that can be added to the trace event log.
 // This is to solve the problem that the MemoryDumpSessionState is refcounted
-// but the tracing subsystem wants a scoped_ptr<ConvertableToTraceFormat>.
+// but the tracing subsystem wants a std::unique_ptr<ConvertableToTraceFormat>.
 template <typename T>
 struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
   using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const;
@@ -168,7 +172,29 @@
           switches::kEnableHeapProfiling))
     return;
 
-  AllocationContextTracker::SetCaptureEnabled(true);
+  std::string profiling_mode = CommandLine::ForCurrentProcess()
+      ->GetSwitchValueASCII(switches::kEnableHeapProfiling);
+  if (profiling_mode == "") {
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK);
+  }
+  else if (profiling_mode == switches::kEnableHeapProfilingModeNative) {
+#if HAVE_TRACE_STACK_FRAME_POINTERS && \
+    (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
+    // We need frame pointers for native tracing to work, and they are
+    // enabled in profiling and debug builds.
+    AllocationContextTracker::SetCaptureMode(
+        AllocationContextTracker::CaptureMode::NATIVE_STACK);
+#else
+    CHECK(false) << "'" << profiling_mode << "' mode for "
+                 << switches::kEnableHeapProfiling << " flag is not supported "
+                 << "for this platform / build type.";
+#endif
+  } else {
+    CHECK(false) << "Invalid mode '" << profiling_mode << "' for "
+               << switches::kEnableHeapProfiling << " flag.";
+  }
+
   for (auto mdp : dump_providers_)
     mdp->dump_provider->OnHeapProfilingEnabled(true);
   heap_profiling_enabled_ = true;
@@ -211,43 +237,43 @@
 void MemoryDumpManager::RegisterDumpProvider(
     MemoryDumpProvider* mdp,
     const char* name,
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+    scoped_refptr<SingleThreadTaskRunner> task_runner,
     MemoryDumpProvider::Options options) {
   options.dumps_on_single_thread_task_runner = true;
-  RegisterDumpProviderInternal(mdp, name, task_runner, options);
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
 }
 
 void MemoryDumpManager::RegisterDumpProvider(
     MemoryDumpProvider* mdp,
     const char* name,
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
+    scoped_refptr<SingleThreadTaskRunner> task_runner) {
   // Set |dumps_on_single_thread_task_runner| to true because all providers
   // without task runner are run on dump thread.
   MemoryDumpProvider::Options options;
   options.dumps_on_single_thread_task_runner = true;
-  RegisterDumpProviderInternal(mdp, name, task_runner, options);
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
 }
 
 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
     MemoryDumpProvider* mdp,
     const char* name,
-    const scoped_refptr<SequencedTaskRunner>& task_runner,
+    scoped_refptr<SequencedTaskRunner> task_runner,
     MemoryDumpProvider::Options options) {
   DCHECK(task_runner);
   options.dumps_on_single_thread_task_runner = false;
-  RegisterDumpProviderInternal(mdp, name, task_runner, options);
+  RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
 }
 
 void MemoryDumpManager::RegisterDumpProviderInternal(
     MemoryDumpProvider* mdp,
     const char* name,
-    const scoped_refptr<SequencedTaskRunner>& task_runner,
+    scoped_refptr<SequencedTaskRunner> task_runner,
     const MemoryDumpProvider::Options& options) {
   if (dumper_registrations_ignored_for_testing_)
     return;
 
   scoped_refptr<MemoryDumpProviderInfo> mdpinfo =
-      new MemoryDumpProviderInfo(mdp, name, task_runner, options);
+      new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options);
 
   {
     AutoLock lock(lock_);
@@ -267,14 +293,14 @@
 }
 
 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
-    scoped_ptr<MemoryDumpProvider> mdp) {
+    std::unique_ptr<MemoryDumpProvider> mdp) {
   UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
 }
 
 void MemoryDumpManager::UnregisterDumpProviderInternal(
     MemoryDumpProvider* mdp,
     bool take_mdp_ownership_and_delete_async) {
-  scoped_ptr<MemoryDumpProvider> owned_mdp;
+  std::unique_ptr<MemoryDumpProvider> owned_mdp;
   if (take_mdp_ownership_and_delete_async)
     owned_mdp.reset(mdp);
 
@@ -370,7 +396,7 @@
   TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
                                     TRACE_ID_MANGLE(args.dump_guid));
 
-  scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
+  std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
   {
     AutoLock lock(lock_);
     // |dump_thread_| can be nullptr is tracing was disabled before reaching
@@ -400,14 +426,21 @@
 // |lock_| is used in these functions purely to ensure consistency w.r.t.
 // (un)registrations of |dump_providers_|.
 void MemoryDumpManager::SetupNextMemoryDump(
-    scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+    std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
   // in the PostTask below don't end up registering their own dump providers
   // (for discounting trace memory overhead) while holding the |lock_|.
   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
 
-  // If this was the last hop, create a trace event, add it to the trace and
-  // finalize process dump (invoke callback).
+  // |dump_thread_| might be destroyed before getting this point.
+  // It means that tracing was disabled right before starting this dump.
+  // Anyway either tracing is stopped or this was the last hop, create a trace
+  // event, add it to the trace and finalize process dump invoking the callback.
+  if (!pmd_async_state->dump_thread_task_runner.get()) {
+    pmd_async_state->dump_successful = false;
+    pmd_async_state->pending_dump_providers.clear();
+  }
   if (pmd_async_state->pending_dump_providers.empty())
     return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
 
@@ -417,20 +450,12 @@
       pmd_async_state->pending_dump_providers.back().get();
 
   // If the dump provider did not specify a task runner affinity, dump on
-  // |dump_thread_|. Note that |dump_thread_| might have been destroyed
-  // meanwhile.
+  // |dump_thread_| which is already checked above for presence.
   SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
   if (!task_runner) {
     DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
     task_runner = pmd_async_state->dump_thread_task_runner.get();
-    if (!task_runner) {
-      // If tracing was disabled before reaching CreateProcessDump() the
-      // dump_thread_ would have been already torn down. Nack current dump and
-      // continue.
-      pmd_async_state->dump_successful = false;
-      pmd_async_state->pending_dump_providers.pop_back();
-      return SetupNextMemoryDump(std::move(pmd_async_state));
-    }
+    DCHECK(task_runner);
   }
 
   if (mdpinfo->options.dumps_on_single_thread_task_runner &&
@@ -475,6 +500,7 @@
 // (unless disabled).
 void MemoryDumpManager::InvokeOnMemoryDump(
     ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
   // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
   // why it isn't is because of the corner case logic of |did_post_task|
   // above, which needs to take back the ownership of the |pmd_async_state| when
@@ -482,7 +508,7 @@
   // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
   // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
   // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
-  auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
+  auto pmd_async_state = WrapUnique(owned_pmd_async_state);
   owned_pmd_async_state = nullptr;
 
   // Read MemoryDumpProviderInfo thread safety considerations in
@@ -535,7 +561,8 @@
 
 // static
 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
-    scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+    std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+  HEAP_PROFILER_SCOPED_IGNORE;
   DCHECK(pmd_async_state->pending_dump_providers.empty());
   const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
   if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
@@ -554,7 +581,7 @@
   for (const auto& kv : pmd_async_state->process_dumps) {
     ProcessId pid = kv.first;  // kNullProcessId for the current process.
     ProcessMemoryDump* process_memory_dump = kv.second.get();
-    scoped_ptr<TracedValue> traced_value(new TracedValue);
+    std::unique_ptr<TracedValue> traced_value(new TracedValue);
     process_memory_dump->AsValueInto(traced_value.get());
     traced_value->SetString("level_of_detail",
                             MemoryDumpLevelOfDetailToString(
@@ -562,7 +589,8 @@
     const char* const event_name =
         MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
 
-    scoped_ptr<ConvertableToTraceFormat> event_value(std::move(traced_value));
+    std::unique_ptr<ConvertableToTraceFormat> event_value(
+        std::move(traced_value));
     TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
         TRACE_EVENT_PHASE_MEMORY_DUMP,
         TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
@@ -598,7 +626,7 @@
   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
 
   // Spin-up the thread used to invoke unbound dump providers.
-  scoped_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
+  std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
   if (!dump_thread->Start()) {
     LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
     return;
@@ -614,15 +642,15 @@
     // deduplicator will be in use. Add a metadata events to write the frames
     // and type IDs.
     session_state_->SetStackFrameDeduplicator(
-        make_scoped_ptr(new StackFrameDeduplicator));
+        WrapUnique(new StackFrameDeduplicator));
 
     session_state_->SetTypeNameDeduplicator(
-        make_scoped_ptr(new TypeNameDeduplicator));
+        WrapUnique(new TypeNameDeduplicator));
 
     TRACE_EVENT_API_ADD_METADATA_EVENT(
         TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
         "stackFrames",
-        make_scoped_ptr(
+        WrapUnique(
             new SessionStateConvertableProxy<StackFrameDeduplicator>(
                 session_state_,
                 &MemoryDumpSessionState::stack_frame_deduplicator)));
@@ -630,7 +658,7 @@
     TRACE_EVENT_API_ADD_METADATA_EVENT(
         TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
         "typeNames",
-        make_scoped_ptr(new SessionStateConvertableProxy<TypeNameDeduplicator>(
+        WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
             session_state_, &MemoryDumpSessionState::type_name_deduplicator)));
   }
 
@@ -654,15 +682,16 @@
   g_periodic_dumps_count = 0;
   const TraceConfig trace_config =
       TraceLog::GetInstance()->GetCurrentTraceConfig();
-  const TraceConfig::MemoryDumpConfig& config_list =
-      trace_config.memory_dump_config();
-  if (config_list.empty())
+  session_state_->SetMemoryDumpConfig(trace_config.memory_dump_config());
+  const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list =
+      trace_config.memory_dump_config().triggers;
+  if (triggers_list.empty())
     return;
 
   uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
   uint32_t heavy_dump_period_ms = 0;
-  DCHECK_LE(config_list.size(), 2u);
-  for (const TraceConfig::MemoryDumpTriggerConfig& config : config_list) {
+  DCHECK_LE(triggers_list.size(), 2u);
+  for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
     DCHECK(config.periodic_interval_ms);
     if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
       heavy_dump_period_ms = config.periodic_interval_ms;
@@ -682,7 +711,7 @@
   // ensure that the MDM state which depends on the tracing enabled / disabled
   // state is always accessed by the dumping methods holding the |lock_|.
   subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
-  scoped_ptr<Thread> dump_thread;
+  std::unique_ptr<Thread> dump_thread;
   {
     AutoLock lock(lock_);
     dump_thread = std::move(dump_thread_);
@@ -703,11 +732,11 @@
 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
     MemoryDumpProvider* dump_provider,
     const char* name,
-    const scoped_refptr<SequencedTaskRunner>& task_runner,
+    scoped_refptr<SequencedTaskRunner> task_runner,
     const MemoryDumpProvider::Options& options)
     : dump_provider(dump_provider),
       name(name),
-      task_runner(task_runner),
+      task_runner(std::move(task_runner)),
       options(options),
       consecutive_failures(0),
       disabled(false) {}
@@ -729,15 +758,15 @@
 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
     MemoryDumpRequestArgs req_args,
     const MemoryDumpProviderInfo::OrderedSet& dump_providers,
-    const scoped_refptr<MemoryDumpSessionState>& session_state,
+    scoped_refptr<MemoryDumpSessionState> session_state,
     MemoryDumpCallback callback,
-    const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner)
+    scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner)
     : req_args(req_args),
-      session_state(session_state),
+      session_state(std::move(session_state)),
       callback(callback),
       dump_successful(true),
       callback_task_runner(MessageLoop::current()->task_runner()),
-      dump_thread_task_runner(dump_thread_task_runner) {
+      dump_thread_task_runner(std::move(dump_thread_task_runner)) {
   pending_dump_providers.reserve(dump_providers.size());
   pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
 }
@@ -749,7 +778,8 @@
     GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) {
   auto iter = process_dumps.find(pid);
   if (iter == process_dumps.end()) {
-    scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state));
+    std::unique_ptr<ProcessMemoryDump> new_pmd(
+        new ProcessMemoryDump(session_state));
     iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
   }
   return iter->second.get();
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index e9b09f8..817768a 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -73,19 +73,17 @@
   //      the calls to |mdp| will be run on the given |task_runner|. If passed
   //      null |mdp| should be able to handle calls on arbitrary threads.
   //  - options: extra optional arguments. See memory_dump_provider.h.
-  void RegisterDumpProvider(
-      MemoryDumpProvider* mdp,
-      const char* name,
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner);
-  void RegisterDumpProvider(
-      MemoryDumpProvider* mdp,
-      const char* name,
-      const scoped_refptr<SingleThreadTaskRunner>& task_runner,
-      MemoryDumpProvider::Options options);
+  void RegisterDumpProvider(MemoryDumpProvider* mdp,
+                            const char* name,
+                            scoped_refptr<SingleThreadTaskRunner> task_runner);
+  void RegisterDumpProvider(MemoryDumpProvider* mdp,
+                            const char* name,
+                            scoped_refptr<SingleThreadTaskRunner> task_runner,
+                            MemoryDumpProvider::Options options);
   void RegisterDumpProviderWithSequencedTaskRunner(
       MemoryDumpProvider* mdp,
       const char* name,
-      const scoped_refptr<SequencedTaskRunner>& task_runner,
+      scoped_refptr<SequencedTaskRunner> task_runner,
       MemoryDumpProvider::Options options);
   void UnregisterDumpProvider(MemoryDumpProvider* mdp);
 
@@ -96,7 +94,8 @@
   //  - The |mdp| will be deleted at some point in the near future.
   //  - Its deletion will not happen concurrently with the OnMemoryDump() call.
   // Note that OnMemoryDump() calls can still happen after this method returns.
-  void UnregisterAndDeleteDumpProviderSoon(scoped_ptr<MemoryDumpProvider> mdp);
+  void UnregisterAndDeleteDumpProviderSoon(
+      std::unique_ptr<MemoryDumpProvider> mdp);
 
   // Requests a memory dump. The dump might happen or not depending on the
   // filters and categories specified when enabling tracing.
@@ -174,17 +173,16 @@
     using OrderedSet =
         std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
 
-    MemoryDumpProviderInfo(
-        MemoryDumpProvider* dump_provider,
-        const char* name,
-        const scoped_refptr<SequencedTaskRunner>& task_runner,
-        const MemoryDumpProvider::Options& options);
+    MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
+                           const char* name,
+                           scoped_refptr<SequencedTaskRunner> task_runner,
+                           const MemoryDumpProvider::Options& options);
 
     MemoryDumpProvider* const dump_provider;
 
     // Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
     // nullptr in all other cases.
-    scoped_ptr<MemoryDumpProvider> owned_dump_provider;
+    std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
 
     // Human readable name, for debugging and testing. Not necessarily unique.
     const char* const name;
@@ -217,9 +215,9 @@
     ProcessMemoryDumpAsyncState(
         MemoryDumpRequestArgs req_args,
         const MemoryDumpProviderInfo::OrderedSet& dump_providers,
-        const scoped_refptr<MemoryDumpSessionState>& session_state,
+        scoped_refptr<MemoryDumpSessionState> session_state,
         MemoryDumpCallback callback,
-        const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner);
+        scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner);
     ~ProcessMemoryDumpAsyncState();
 
     // Gets or creates the memory dump container for the given target process.
@@ -229,7 +227,7 @@
     // being dumped from the current process. Typically each process dumps only
     // for itself, unless dump providers specify a different |target_process| in
     // MemoryDumpProvider::Options.
-    std::map<ProcessId, scoped_ptr<ProcessMemoryDump>> process_dumps;
+    std::map<ProcessId, std::unique_ptr<ProcessMemoryDump>> process_dumps;
 
     // The arguments passed to the initial CreateProcessDump() request.
     const MemoryDumpRequestArgs req_args;
@@ -272,7 +270,7 @@
 
   static void SetInstanceForTesting(MemoryDumpManager* instance);
   static void FinalizeDumpAndAddToTrace(
-      scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+      std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
 
   // Enable heap profiling if kEnableHeapProfiling is specified.
   void EnableHeapProfilingIfNeeded();
@@ -288,7 +286,7 @@
   // the MDP while registration. On failure to do so, skips and continues to
   // next MDP.
   void SetupNextMemoryDump(
-      scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+      std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
 
   // Invokes OnMemoryDump() of the next MDP and calls SetupNextMemoryDump() at
   // the end to continue the ProcessMemoryDump. Should be called on the MDP task
@@ -299,7 +297,7 @@
   void RegisterDumpProviderInternal(
       MemoryDumpProvider* mdp,
       const char* name,
-      const scoped_refptr<SequencedTaskRunner>& task_runner,
+      scoped_refptr<SequencedTaskRunner> task_runner,
       const MemoryDumpProvider::Options& options);
 
   // Helper for the public UnregisterDumpProvider* functions.
@@ -331,7 +329,7 @@
 
   // Thread used for MemoryDumpProviders which don't specify a task runner
   // affinity.
-  scoped_ptr<Thread> dump_thread_;
+  std::unique_ptr<Thread> dump_thread_;
 
   // The unique id of the child process. This is created only for tracing and is
   // expected to be valid only when tracing is enabled.
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 138ba69..c1295ef 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -6,21 +6,22 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/bind_helpers.h"
+#include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted_memory.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/run_loop.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/waitable_event.h"
 #include "base/test/test_io_thread.h"
 #include "base/test/trace_event_analyzer.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/sequenced_worker_pool.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/process_memory_dump.h"
 #include "base/trace_event/trace_buffer.h"
@@ -51,42 +52,29 @@
 
 void RegisterDumpProvider(
     MemoryDumpProvider* mdp,
-    const scoped_refptr<base::SequencedTaskRunner>& task_runner,
-    const MemoryDumpProvider::Options& options,
-    bool dumps_on_single_thread_task_runner) {
+    scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+    const MemoryDumpProvider::Options& options) {
   MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
   mdm->set_dumper_registrations_ignored_for_testing(false);
   const char* kMDPName = "TestDumpProvider";
-  if (dumps_on_single_thread_task_runner) {
-    scoped_refptr<base::SingleThreadTaskRunner> single_thread_task_runner =
-        static_cast<base::SingleThreadTaskRunner*>(task_runner.get());
-    mdm->RegisterDumpProvider(mdp, kMDPName,
-                              std::move(single_thread_task_runner), options);
-  } else {
-    mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
-                                                     options);
-  }
+  mdm->RegisterDumpProvider(mdp, kMDPName, std::move(task_runner), options);
   mdm->set_dumper_registrations_ignored_for_testing(true);
 }
 
-void RegisterDumpProvider(
-    MemoryDumpProvider* mdp,
-    const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
-    const MemoryDumpProvider::Options& options) {
-  RegisterDumpProvider(mdp, task_runner, options,
-                       true /* dumps_on_single_thread_task_runner */);
-}
-
 void RegisterDumpProvider(MemoryDumpProvider* mdp) {
   RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
 }
 
 void RegisterDumpProviderWithSequencedTaskRunner(
     MemoryDumpProvider* mdp,
-    const scoped_refptr<base::SequencedTaskRunner>& task_runner,
+    scoped_refptr<base::SequencedTaskRunner> task_runner,
     const MemoryDumpProvider::Options& options) {
-  RegisterDumpProvider(mdp, task_runner, options,
-                       false /* dumps_on_single_thread_task_runner */);
+  MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
+  mdm->set_dumper_registrations_ignored_for_testing(false);
+  const char* kMDPName = "TestDumpProvider";
+  mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
+                                                   options);
+  mdm->set_dumper_registrations_ignored_for_testing(true);
 }
 
 void OnTraceDataCollected(Closure quit_closure,
@@ -129,7 +117,17 @@
   MOCK_METHOD2(OnMemoryDump,
                bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
 
-  MockMemoryDumpProvider() : enable_mock_destructor(false) {}
+  MockMemoryDumpProvider() : enable_mock_destructor(false) {
+    ON_CALL(*this, OnMemoryDump(_, _))
+        .WillByDefault(Invoke([](const MemoryDumpArgs&,
+                                 ProcessMemoryDump* pmd) -> bool {
+          // |session_state| should not be null under any circumstances when
+          // invoking a memory dump. The problem might arise in race conditions
+          // like crbug.com/600570 .
+          EXPECT_TRUE(pmd->session_state().get() != nullptr);
+          return true;
+        }));
+  }
   ~MockMemoryDumpProvider() override {
     if (enable_mock_destructor)
       Destructor();
@@ -246,12 +244,12 @@
   }
 
   const MemoryDumpProvider::Options kDefaultOptions;
-  scoped_ptr<MemoryDumpManager> mdm_;
-  scoped_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
+  std::unique_ptr<MemoryDumpManager> mdm_;
+  std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
   bool last_callback_success_;
 
  private:
-  scoped_ptr<MessageLoop> message_loop_;
+  std::unique_ptr<MessageLoop> message_loop_;
 
   // We want our singleton torn down after each test.
   ShadowingAtExitManager at_exit_manager_;
@@ -458,18 +456,18 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   const uint32_t kNumInitialThreads = 8;
 
-  std::vector<scoped_ptr<Thread>> threads;
-  std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<Thread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
 
   // Create the threads and setup the expectations. Given that at each iteration
   // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be
   // invoked a number of times equal to its index.
   for (uint32_t i = kNumInitialThreads; i > 0; --i) {
-    threads.push_back(make_scoped_ptr(new Thread("test thread")));
+    threads.push_back(WrapUnique(new Thread("test thread")));
     auto thread = threads.back().get();
     thread->Start();
     scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner();
-    mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
     auto mdp = mdps.back().get();
     RegisterDumpProvider(mdp, task_runner, kDefaultOptions);
     EXPECT_CALL(*mdp, OnMemoryDump(_, _))
@@ -663,13 +661,13 @@
 // dumping from a different thread than the dumping thread.
 TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
-  std::vector<scoped_ptr<TestIOThread>> threads;
-  std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<TestIOThread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
 
   for (int i = 0; i < 2; i++) {
     threads.push_back(
-        make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart)));
-    mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
+        WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
                          kDefaultOptions);
   }
@@ -678,7 +676,7 @@
 
   // When OnMemoryDump is called on either of the dump providers, it will
   // unregister the other one.
-  for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+  for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
     int other_idx = (mdps.front() == mdp);
     TestIOThread* other_thread = threads[other_idx].get();
     MockMemoryDumpProvider* other_mdp = mdps[other_idx].get();
@@ -713,13 +711,13 @@
 // its dump provider should be skipped but the dump itself should succeed.
 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
-  std::vector<scoped_ptr<TestIOThread>> threads;
-  std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<TestIOThread>> threads;
+  std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps;
 
   for (int i = 0; i < 2; i++) {
     threads.push_back(
-        make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart)));
-    mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider()));
+        WrapUnique(new TestIOThread(TestIOThread::kAutoStart)));
+    mdps.push_back(WrapUnique(new MockMemoryDumpProvider()));
     RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(),
                          kDefaultOptions);
   }
@@ -728,7 +726,7 @@
 
   // When OnMemoryDump is called on either of the dump providers, it will
   // tear down the thread of the other one.
-  for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) {
+  for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) {
     int other_idx = (mdps.front() == mdp);
     TestIOThread* other_thread = threads[other_idx].get();
     auto on_dump = [other_thread, &on_memory_dump_call_count](
@@ -901,7 +899,7 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
 
   // Register a bound dump provider.
-  scoped_ptr<Thread> mdp_thread(new Thread("test thread"));
+  std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
   mdp_thread->Start();
   MockMemoryDumpProvider mdp_with_affinity;
   RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(),
@@ -950,8 +948,14 @@
   base::WaitableEvent tracing_disabled_event(false, false);
   InitializeMemoryDumpManager(false /* is_coordinator */);
 
-  MockMemoryDumpProvider mdp;
-  RegisterDumpProvider(&mdp);
+  std::unique_ptr<Thread> mdp_thread(new Thread("test thread"));
+  mdp_thread->Start();
+
+  // Create both same-thread MDP and another MDP with dedicated thread
+  MockMemoryDumpProvider mdp1;
+  RegisterDumpProvider(&mdp1);
+  MockMemoryDumpProvider mdp2;
+  RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
   EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
 
   EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
@@ -961,6 +965,11 @@
         delegate_->CreateProcessDump(args, callback);
       }));
 
+  // If tracing is disabled for current session CreateProcessDump() should NOT
+  // request dumps from providers. Real-world regression: crbug.com/600570 .
+  EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
+  EXPECT_CALL(mdp2, OnMemoryDump(_, _)).Times(0);
+
   last_callback_success_ = true;
   RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
                            MemoryDumpLevelOfDetail::DETAILED);
@@ -1008,7 +1017,7 @@
   buffer.Finish();
 
   // Analyze the JSON.
-  scoped_ptr<trace_analyzer::TraceAnalyzer> analyzer = make_scoped_ptr(
+  std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer = WrapUnique(
       trace_analyzer::TraceAnalyzer::Create(trace_output.json_output));
   trace_analyzer::TraceEventVector events;
   analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP),
@@ -1029,9 +1038,9 @@
   InitializeMemoryDumpManager(false /* is_coordinator */);
   static const int kNumProviders = 3;
   int dtor_count = 0;
-  std::vector<scoped_ptr<MemoryDumpProvider>> mdps;
+  std::vector<std::unique_ptr<MemoryDumpProvider>> mdps;
   for (int i = 0; i < kNumProviders; ++i) {
-    scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+    std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
     mdp->enable_mock_destructor = true;
     EXPECT_CALL(*mdp, Destructor())
         .WillOnce(Invoke([&dtor_count]() { dtor_count++; }));
@@ -1054,7 +1063,7 @@
 // happen on the same thread (the MemoryDumpManager utility thread).
 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) {
   InitializeMemoryDumpManager(false /* is_coordinator */);
-  scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
+  std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider);
   mdp->enable_mock_destructor = true;
   RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions);
 
@@ -1068,7 +1077,7 @@
         base::Bind(
             &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon,
             base::Unretained(MemoryDumpManager::GetInstance()),
-            base::Passed(scoped_ptr<MemoryDumpProvider>(std::move(mdp)))));
+            base::Passed(std::unique_ptr<MemoryDumpProvider>(std::move(mdp)))));
     thread_for_unregistration.Stop();
     return true;
   };
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index cf221d3..f4db01a 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -56,7 +56,7 @@
 
   // Called by the MemoryDumpManager when an allocator should start or stop
   // collecting extensive allocation data, if supported.
-  virtual void OnHeapProfilingEnabled(bool /* enabled */) {}
+  virtual void OnHeapProfilingEnabled(bool) {}
 
  protected:
   MemoryDumpProvider() {}
diff --git a/base/trace_event/memory_dump_session_state.cc b/base/trace_event/memory_dump_session_state.cc
index 576da31..b3d9a8c 100644
--- a/base/trace_event/memory_dump_session_state.cc
+++ b/base/trace_event/memory_dump_session_state.cc
@@ -12,16 +12,21 @@
 MemoryDumpSessionState::~MemoryDumpSessionState() {}
 
 void MemoryDumpSessionState::SetStackFrameDeduplicator(
-    scoped_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
+    std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
   DCHECK(!stack_frame_deduplicator_);
   stack_frame_deduplicator_ = std::move(stack_frame_deduplicator);
 }
 
 void MemoryDumpSessionState::SetTypeNameDeduplicator(
-    scoped_ptr<TypeNameDeduplicator> type_name_deduplicator) {
+    std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator) {
   DCHECK(!type_name_deduplicator_);
   type_name_deduplicator_ = std::move(type_name_deduplicator);
 }
 
+void MemoryDumpSessionState::SetMemoryDumpConfig(
+    const TraceConfig::MemoryDumpConfig& config) {
+  memory_dump_config_ = config;
+}
+
 }  // namespace trace_event
 }  // namespace base
diff --git a/base/trace_event/memory_dump_session_state.h b/base/trace_event/memory_dump_session_state.h
index 879545f..f199ec1 100644
--- a/base/trace_event/memory_dump_session_state.h
+++ b/base/trace_event/memory_dump_session_state.h
@@ -5,10 +5,12 @@
 #ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
 #define BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
 
+#include <memory>
+
 #include "base/base_export.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
+#include "base/trace_event/trace_config.h"
 
 namespace base {
 namespace trace_event {
@@ -27,7 +29,7 @@
   }
 
   void SetStackFrameDeduplicator(
-      scoped_ptr<StackFrameDeduplicator> stack_frame_deduplicator);
+      std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator);
 
   // Returns the type name deduplicator that should be used by memory dump
   // providers when doing a heap dump.
@@ -36,7 +38,13 @@
   }
 
   void SetTypeNameDeduplicator(
-      scoped_ptr<TypeNameDeduplicator> type_name_deduplicator);
+      std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator);
+
+  const TraceConfig::MemoryDumpConfig& memory_dump_config() const {
+    return memory_dump_config_;
+  }
+
+  void SetMemoryDumpConfig(const TraceConfig::MemoryDumpConfig& config);
 
  private:
   friend class RefCountedThreadSafe<MemoryDumpSessionState>;
@@ -44,11 +52,15 @@
 
   // Deduplicates backtraces in heap dumps so they can be written once when the
   // trace is finalized.
-  scoped_ptr<StackFrameDeduplicator> stack_frame_deduplicator_;
+  std::unique_ptr<StackFrameDeduplicator> stack_frame_deduplicator_;
 
   // Deduplicates type names in heap dumps so they can be written once when the
   // trace is finalized.
-  scoped_ptr<TypeNameDeduplicator> type_name_deduplicator_;
+  std::unique_ptr<TypeNameDeduplicator> type_name_deduplicator_;
+
+  // The memory dump config, copied at the time when the tracing session was
+  // started.
+  TraceConfig::MemoryDumpConfig memory_dump_config_;
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index 74cbcc2..52eccbe 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -5,9 +5,13 @@
 #include "base/trace_event/process_memory_dump.h"
 
 #include <errno.h>
+
 #include <vector>
 
+#include "base/memory/ptr_util.h"
 #include "base/process/process_metrics.h"
+#include "base/strings/stringprintf.h"
+#include "base/trace_event/heap_profiler_heap_dump_writer.h"
 #include "base/trace_event/process_memory_totals.h"
 #include "base/trace_event/trace_event_argument.h"
 #include "build/build_config.h"
@@ -82,12 +86,12 @@
   size_t max_vec_size =
       GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
 #if defined(OS_MACOSX) || defined(OS_IOS)
-  scoped_ptr<char[]> vec(new char[max_vec_size]);
+  std::unique_ptr<char[]> vec(new char[max_vec_size]);
 #elif defined(OS_WIN)
-  scoped_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
+  std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
       new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
 #elif defined(OS_POSIX)
-  scoped_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
+  std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
 #endif
 
   while (offset < mapped_size) {
@@ -144,32 +148,34 @@
 #endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
 
 ProcessMemoryDump::ProcessMemoryDump(
-    const scoped_refptr<MemoryDumpSessionState>& session_state)
+    scoped_refptr<MemoryDumpSessionState> session_state)
     : has_process_totals_(false),
       has_process_mmaps_(false),
-      session_state_(session_state) {}
+      session_state_(std::move(session_state)) {}
 
 ProcessMemoryDump::~ProcessMemoryDump() {}
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name) {
   return AddAllocatorDumpInternal(
-      make_scoped_ptr(new MemoryAllocatorDump(absolute_name, this)));
+      WrapUnique(new MemoryAllocatorDump(absolute_name, this)));
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name,
     const MemoryAllocatorDumpGuid& guid) {
   return AddAllocatorDumpInternal(
-      make_scoped_ptr(new MemoryAllocatorDump(absolute_name, this, guid)));
+      WrapUnique(new MemoryAllocatorDump(absolute_name, this, guid)));
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
-    scoped_ptr<MemoryAllocatorDump> mad) {
+    std::unique_ptr<MemoryAllocatorDump> mad) {
   auto insertion_result = allocator_dumps_.insert(
       std::make_pair(mad->absolute_name(), std::move(mad)));
-  DCHECK(insertion_result.second) << "Duplicate name: " << mad->absolute_name();
-  return insertion_result.first->second.get();
+  MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
+  DCHECK(insertion_result.second) << "Duplicate name: "
+                                  << inserted_mad->absolute_name();
+  return inserted_mad;
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
@@ -214,11 +220,27 @@
 }
 
 void ProcessMemoryDump::AddHeapDump(const std::string& absolute_name,
-                                    scoped_ptr<TracedValue> heap_dump) {
+                                    std::unique_ptr<TracedValue> heap_dump) {
   DCHECK_EQ(0ul, heap_dumps_.count(absolute_name));
   heap_dumps_[absolute_name] = std::move(heap_dump);
 }
 
+void ProcessMemoryDump::DumpHeapUsage(
+    const base::hash_map<base::trace_event::AllocationContext,
+        base::trace_event::AllocationMetrics>& metrics_by_context,
+    base::trace_event::TraceEventMemoryOverhead& overhead,
+    const char* allocator_name) {
+  if (!metrics_by_context.empty()) {
+    std::unique_ptr<TracedValue> heap_dump = ExportHeapDump(
+        metrics_by_context, *session_state());
+    AddHeapDump(allocator_name, std::move(heap_dump));
+  }
+
+  std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
+                                             allocator_name);
+  overhead.DumpInto(base_name.c_str(), this);
+}
+
 void ProcessMemoryDump::Clear() {
   if (has_process_totals_) {
     process_totals_.Clear();
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
index 37c0aa1..51e4b5f 100644
--- a/base/trace_event/process_memory_dump.h
+++ b/base/trace_event/process_memory_dump.h
@@ -48,9 +48,10 @@
   // Maps allocator dumps absolute names (allocator_name/heap/subheap) to
   // MemoryAllocatorDump instances.
   using AllocatorDumpsMap =
-      std::unordered_map<std::string, scoped_ptr<MemoryAllocatorDump>>;
+      std::unordered_map<std::string, std::unique_ptr<MemoryAllocatorDump>>;
 
-  using HeapDumpsMap = std::unordered_map<std::string, scoped_ptr<TracedValue>>;
+  using HeapDumpsMap =
+      std::unordered_map<std::string, std::unique_ptr<TracedValue>>;
 
 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
   // Returns the number of bytes in a kernel memory page. Some platforms may
@@ -66,7 +67,7 @@
   static size_t CountResidentBytes(void* start_address, size_t mapped_size);
 #endif
 
-  ProcessMemoryDump(const scoped_refptr<MemoryDumpSessionState>& session_state);
+  ProcessMemoryDump(scoped_refptr<MemoryDumpSessionState> session_state);
   ~ProcessMemoryDump();
 
   // Creates a new MemoryAllocatorDump with the given name and returns the
@@ -118,8 +119,17 @@
   // Adds a heap dump for the allocator with |absolute_name|. The |TracedValue|
   // must have the correct format. |trace_event::HeapDumper| will generate such
   // a value from a |trace_event::AllocationRegister|.
+  // TODO(bashi): Remove this when WebMemoryDumpProvider is gone.
+  // http://crbug.com/605822
   void AddHeapDump(const std::string& absolute_name,
-                   scoped_ptr<TracedValue> heap_dump);
+                   std::unique_ptr<TracedValue> heap_dump);
+
+  // Dumps heap usage with |allocator_name|.
+  void DumpHeapUsage(const base::hash_map<base::trace_event::AllocationContext,
+                                          base::trace_event::AllocationMetrics>&
+                         metrics_by_context,
+                     base::trace_event::TraceEventMemoryOverhead& overhead,
+                     const char* allocator_name);
 
   // Adds an ownership relationship between two MemoryAllocatorDump(s) with the
   // semantics: |source| owns |target|, and has the effect of attributing
@@ -175,7 +185,7 @@
 
  private:
   MemoryAllocatorDump* AddAllocatorDumpInternal(
-      scoped_ptr<MemoryAllocatorDump> mad);
+      std::unique_ptr<MemoryAllocatorDump> mad);
 
   ProcessMemoryTotals process_totals_;
   bool has_process_totals_;
diff --git a/base/trace_event/process_memory_dump_unittest.cc b/base/trace_event/process_memory_dump_unittest.cc
index e7fe960..3a93b2c 100644
--- a/base/trace_event/process_memory_dump_unittest.cc
+++ b/base/trace_event/process_memory_dump_unittest.cc
@@ -23,7 +23,7 @@
 }  // namespace
 
 TEST(ProcessMemoryDumpTest, Clear) {
-  scoped_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
+  std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
   pmd1->CreateAllocatorDump("mad1");
   pmd1->CreateAllocatorDump("mad2");
   ASSERT_FALSE(pmd1->allocator_dumps().empty());
@@ -54,7 +54,7 @@
   ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  scoped_ptr<TracedValue> traced_value(new TracedValue);
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
   pmd1->AsValueInto(traced_value.get());
 
   // Check that the pmd can be reused and behaves as expected.
@@ -79,11 +79,11 @@
 }
 
 TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
-  scoped_ptr<TracedValue> traced_value(new TracedValue);
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
   TracedValue* heap_dumps_ptr[4];
-  scoped_ptr<TracedValue> heap_dump;
+  std::unique_ptr<TracedValue> heap_dump;
 
-  scoped_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
+  std::unique_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
   auto mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
   auto mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
   pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
@@ -94,7 +94,7 @@
   heap_dumps_ptr[1] = heap_dump.get();
   pmd1->AddHeapDump("pmd1/heap_dump2", std::move(heap_dump));
 
-  scoped_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(nullptr));
+  std::unique_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(nullptr));
   auto mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
   auto mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
   pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
@@ -154,7 +154,7 @@
 }
 
 TEST(ProcessMemoryDumpTest, Suballocations) {
-  scoped_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+  std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
   const std::string allocator_dump_name = "fakealloc/allocated_objects";
   pmd->CreateAllocatorDump(allocator_dump_name);
 
@@ -191,14 +191,14 @@
   ASSERT_TRUE(found_edge[1]);
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  scoped_ptr<TracedValue> traced_value(new TracedValue);
+  std::unique_ptr<TracedValue> traced_value(new TracedValue);
   pmd->AsValueInto(traced_value.get());
 
   pmd.reset();
 }
 
 TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
-  scoped_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+  std::unique_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
   MemoryAllocatorDumpGuid shared_mad_guid(1);
   auto shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
   ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
@@ -227,7 +227,7 @@
 
   // Allocate few page of dirty memory and check if it is resident.
   const size_t size1 = 5 * page_size;
-  scoped_ptr<char, base::AlignedFreeDeleter> memory1(
+  std::unique_ptr<char, base::AlignedFreeDeleter> memory1(
       static_cast<char*>(base::AlignedAlloc(size1, page_size)));
   memset(memory1.get(), 0, size1);
   size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1.get(), size1);
@@ -235,7 +235,7 @@
 
   // Allocate a large memory segment (> 8Mib).
   const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
-  scoped_ptr<char, base::AlignedFreeDeleter> memory2(
+  std::unique_ptr<char, base::AlignedFreeDeleter> memory2(
       static_cast<char*>(base::AlignedAlloc(kVeryLargeMemorySize, page_size)));
   memset(memory2.get(), 0, kVeryLargeMemorySize);
   size_t res2 = ProcessMemoryDump::CountResidentBytes(memory2.get(),
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
index 9630a7a..d40f430 100644
--- a/base/trace_event/trace_buffer.cc
+++ b/base/trace_event/trace_buffer.cc
@@ -4,11 +4,12 @@
 
 #include "base/trace_event/trace_buffer.h"
 
+#include <memory>
 #include <utility>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/trace_event_impl.h"
 
 namespace base {
@@ -30,7 +31,9 @@
       recyclable_chunks_queue_[i] = i;
   }
 
-  scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+  std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+    HEAP_PROFILER_SCOPED_IGNORE;
+
     // Because the number of threads is much less than the number of chunks,
     // the queue should never be empty.
     DCHECK(!QueueIsEmpty());
@@ -49,10 +52,11 @@
     else
       chunk = new TraceBufferChunk(current_chunk_seq_++);
 
-    return scoped_ptr<TraceBufferChunk>(chunk);
+    return std::unique_ptr<TraceBufferChunk>(chunk);
   }
 
-  void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+  void ReturnChunk(size_t index,
+                   std::unique_ptr<TraceBufferChunk> chunk) override {
     // When this method is called, the queue should not be full because it
     // can contain all chunks including the one to be returned.
     DCHECK(!QueueIsFull());
@@ -135,9 +139,9 @@
   }
 
   size_t max_chunks_;
-  std::vector<scoped_ptr<TraceBufferChunk>> chunks_;
+  std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
 
-  scoped_ptr<size_t[]> recyclable_chunks_queue_;
+  std::unique_ptr<size_t[]> recyclable_chunks_queue_;
   size_t queue_head_;
   size_t queue_tail_;
 
@@ -156,7 +160,9 @@
     chunks_.reserve(max_chunks_);
   }
 
-  scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+  std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+    HEAP_PROFILER_SCOPED_IGNORE;
+
     // This function may be called when adding normal events or indirectly from
     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
     // have to add the metadata events and flush thread-local buffers even if
@@ -165,11 +171,12 @@
     chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
     ++in_flight_chunk_count_;
     // + 1 because zero chunk_seq is not allowed.
-    return scoped_ptr<TraceBufferChunk>(
+    return std::unique_ptr<TraceBufferChunk>(
         new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
   }
 
-  void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+  void ReturnChunk(size_t index,
+                   std::unique_ptr<TraceBufferChunk> chunk) override {
     DCHECK_GT(in_flight_chunk_count_, 0u);
     DCHECK_LT(index, chunks_.size());
     DCHECK(!chunks_[index]);
diff --git a/base/trace_event/trace_buffer.h b/base/trace_event/trace_buffer.h
index c4c1c2b..4885a3c 100644
--- a/base/trace_event/trace_buffer.h
+++ b/base/trace_event/trace_buffer.h
@@ -49,7 +49,7 @@
 
  private:
   size_t next_free_;
-  scoped_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
+  std::unique_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
   TraceEvent chunk_[kTraceBufferChunkSize];
   uint32_t seq_;
 };
@@ -59,9 +59,9 @@
  public:
   virtual ~TraceBuffer() {}
 
-  virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
+  virtual std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
   virtual void ReturnChunk(size_t index,
-                           scoped_ptr<TraceBufferChunk> chunk) = 0;
+                           std::unique_ptr<TraceBufferChunk> chunk) = 0;
 
   virtual bool IsFull() const = 0;
   virtual size_t Size() const = 0;
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index d60c081..25a0cd6 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -10,6 +10,7 @@
 
 #include "base/json/json_reader.h"
 #include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
 #include "base/strings/pattern.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_tokenizer.h"
@@ -49,12 +50,14 @@
 const char kTriggersParam[] = "triggers";
 const char kPeriodicIntervalParam[] = "periodic_interval_ms";
 const char kModeParam[] = "mode";
+const char kHeapProfilerOptions[] = "heap_profiler_options";
+const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
 
 // Default configuration of memory dumps.
-const TraceConfig::MemoryDumpTriggerConfig kDefaultHeavyMemoryDumpTrigger = {
+const TraceConfig::MemoryDumpConfig::Trigger kDefaultHeavyMemoryDumpTrigger = {
     2000,  // periodic_interval_ms
     MemoryDumpLevelOfDetail::DETAILED};
-const TraceConfig::MemoryDumpTriggerConfig kDefaultLightMemoryDumpTrigger = {
+const TraceConfig::MemoryDumpConfig::Trigger kDefaultLightMemoryDumpTrigger = {
     250,  // periodic_interval_ms
     MemoryDumpLevelOfDetail::LIGHT};
 
@@ -74,6 +77,26 @@
 
 }  // namespace
 
+
+TraceConfig::MemoryDumpConfig::HeapProfiler::HeapProfiler() :
+    breakdown_threshold_bytes(kDefaultBreakdownThresholdBytes) {};
+
+void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
+  breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
+}
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig() {};
+
+TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
+    const MemoryDumpConfig& other) = default;
+
+TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() {};
+
+void TraceConfig::MemoryDumpConfig::Clear() {
+  triggers.clear();
+  heap_profiler_options.Clear();
+}
+
 TraceConfig::TraceConfig() {
   InitializeDefault();
 }
@@ -105,6 +128,10 @@
   InitializeFromStrings(category_filter_string, trace_options_string);
 }
 
+TraceConfig::TraceConfig(const DictionaryValue& config) {
+  InitializeFromConfigDict(config);
+}
+
 TraceConfig::TraceConfig(const std::string& config_string) {
   if (!config_string.empty())
     InitializeFromConfigString(config_string);
@@ -156,9 +183,9 @@
   return json;
 }
 
-scoped_ptr<ConvertableToTraceFormat> TraceConfig::AsConvertableToTraceFormat()
-    const {
-  return make_scoped_ptr(new ConvertableTraceConfigToTraceFormat(*this));
+std::unique_ptr<ConvertableToTraceFormat>
+TraceConfig::AsConvertableToTraceFormat() const {
+  return WrapUnique(new ConvertableTraceConfigToTraceFormat(*this));
 }
 
 std::string TraceConfig::ToCategoryFilterString() const {
@@ -252,9 +279,9 @@
     included_categories_.clear();
   }
 
-  memory_dump_config_.insert(memory_dump_config_.end(),
-                             config.memory_dump_config_.begin(),
-                             config.memory_dump_config_.end());
+  memory_dump_config_.triggers.insert(memory_dump_config_.triggers.end(),
+                             config.memory_dump_config_.triggers.begin(),
+                             config.memory_dump_config_.triggers.end());
 
   disabled_categories_.insert(disabled_categories_.end(),
                               config.disabled_categories_.begin(),
@@ -276,7 +303,7 @@
   disabled_categories_.clear();
   excluded_categories_.clear();
   synthetic_delays_.clear();
-  memory_dump_config_.clear();
+  memory_dump_config_.Clear();
 }
 
 void TraceConfig::InitializeDefault() {
@@ -288,18 +315,10 @@
   excluded_categories_.push_back("*Test");
 }
 
-void TraceConfig::InitializeFromConfigString(const std::string& config_string) {
-  scoped_ptr<base::Value> value(base::JSONReader::Read(config_string));
-  if (!value || !value->IsType(base::Value::TYPE_DICTIONARY)) {
-    InitializeDefault();
-    return;
-  }
-  scoped_ptr<base::DictionaryValue> dict(
-        static_cast<base::DictionaryValue*>(value.release()));
-
+void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
   record_mode_ = RECORD_UNTIL_FULL;
   std::string record_mode;
-  if (dict->GetString(kRecordModeParam, &record_mode)) {
+  if (dict.GetString(kRecordModeParam, &record_mode)) {
     if (record_mode == kRecordUntilFull) {
       record_mode_ = RECORD_UNTIL_FULL;
     } else if (record_mode == kRecordContinuously) {
@@ -312,42 +331,57 @@
   }
 
   bool enable_sampling;
-  if (!dict->GetBoolean(kEnableSamplingParam, &enable_sampling))
+  if (!dict.GetBoolean(kEnableSamplingParam, &enable_sampling))
     enable_sampling_ = false;
   else
     enable_sampling_ = enable_sampling;
 
   bool enable_systrace;
-  if (!dict->GetBoolean(kEnableSystraceParam, &enable_systrace))
+  if (!dict.GetBoolean(kEnableSystraceParam, &enable_systrace))
     enable_systrace_ = false;
   else
     enable_systrace_ = enable_systrace;
 
   bool enable_argument_filter;
-  if (!dict->GetBoolean(kEnableArgumentFilterParam, &enable_argument_filter))
+  if (!dict.GetBoolean(kEnableArgumentFilterParam, &enable_argument_filter))
     enable_argument_filter_ = false;
   else
     enable_argument_filter_ = enable_argument_filter;
 
-  base::ListValue* category_list = nullptr;
-  if (dict->GetList(kIncludedCategoriesParam, &category_list))
+  const base::ListValue* category_list = nullptr;
+  if (dict.GetList(kIncludedCategoriesParam, &category_list))
     SetCategoriesFromIncludedList(*category_list);
-  if (dict->GetList(kExcludedCategoriesParam, &category_list))
+  if (dict.GetList(kExcludedCategoriesParam, &category_list))
     SetCategoriesFromExcludedList(*category_list);
-  if (dict->GetList(kSyntheticDelaysParam, &category_list))
+  if (dict.GetList(kSyntheticDelaysParam, &category_list))
     SetSyntheticDelaysFromList(*category_list);
 
   if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
     // If dump triggers not set, the client is using the legacy with just
     // category enabled. So, use the default periodic dump config.
-    base::DictionaryValue* memory_dump_config = nullptr;
-    if (dict->GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
+    const base::DictionaryValue* memory_dump_config = nullptr;
+    if (dict.GetDictionary(kMemoryDumpConfigParam, &memory_dump_config))
       SetMemoryDumpConfig(*memory_dump_config);
     else
       SetDefaultMemoryDumpConfig();
   }
 }
 
+void TraceConfig::InitializeFromConfigString(const std::string& config_string) {
+  std::unique_ptr<Value> value(JSONReader::Read(config_string));
+  if (!value)
+    return InitializeDefault();
+
+  const DictionaryValue* dict = nullptr;
+  bool is_dict = value->GetAsDictionary(&dict);
+
+  if (!is_dict)
+    return InitializeDefault();
+
+  DCHECK(dict);
+  InitializeFromConfigDict(*dict);
+}
+
 void TraceConfig::InitializeFromStrings(
     const std::string& category_filter_string,
     const std::string& trace_options_string) {
@@ -465,7 +499,7 @@
   if (categories.empty())
     return;
 
-  scoped_ptr<base::ListValue> list(new base::ListValue());
+  std::unique_ptr<base::ListValue> list(new base::ListValue());
   for (StringList::const_iterator ci = categories.begin();
        ci != categories.end();
        ++ci) {
@@ -477,39 +511,54 @@
 
 void TraceConfig::SetMemoryDumpConfig(
     const base::DictionaryValue& memory_dump_config) {
-  memory_dump_config_.clear();
+  // Set triggers
+  memory_dump_config_.triggers.clear();
 
   const base::ListValue* trigger_list = nullptr;
-  if (!memory_dump_config.GetList(kTriggersParam, &trigger_list) ||
-      trigger_list->GetSize() == 0) {
-    return;
+  if (memory_dump_config.GetList(kTriggersParam, &trigger_list) &&
+      trigger_list->GetSize() > 0) {
+    for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
+      const base::DictionaryValue* trigger = nullptr;
+      if (!trigger_list->GetDictionary(i, &trigger))
+        continue;
+
+      MemoryDumpConfig::Trigger dump_config;
+      int interval = 0;
+
+      if (!trigger->GetInteger(kPeriodicIntervalParam, &interval)) {
+        continue;
+      }
+      DCHECK_GT(interval, 0);
+      dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
+      std::string level_of_detail_str;
+      trigger->GetString(kModeParam, &level_of_detail_str);
+      dump_config.level_of_detail =
+          StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+      memory_dump_config_.triggers.push_back(dump_config);
+    }
   }
 
-  for (size_t i = 0; i < trigger_list->GetSize(); ++i) {
-    const base::DictionaryValue* trigger = nullptr;
-    if (!trigger_list->GetDictionary(i, &trigger))
-      continue;
-
-    MemoryDumpTriggerConfig dump_config;
-    int interval = 0;
-
-    if (!trigger->GetInteger(kPeriodicIntervalParam, &interval)) {
-      continue;
+  // Set heap profiler options
+  const base::DictionaryValue* heap_profiler_options = nullptr;
+  if (memory_dump_config.GetDictionary(kHeapProfilerOptions,
+                                       &heap_profiler_options)) {
+    int min_size_bytes = 0;
+    if (heap_profiler_options->GetInteger(kBreakdownThresholdBytes,
+                                         &min_size_bytes)
+        && min_size_bytes >= 0) {
+      memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+          static_cast<size_t>(min_size_bytes);
+    } else {
+      memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
+          MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes;
     }
-    DCHECK_GT(interval, 0);
-    dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
-    std::string level_of_detail_str;
-    trigger->GetString(kModeParam, &level_of_detail_str);
-    dump_config.level_of_detail =
-        StringToMemoryDumpLevelOfDetail(level_of_detail_str);
-    memory_dump_config_.push_back(dump_config);
   }
 }
 
 void TraceConfig::SetDefaultMemoryDumpConfig() {
-  memory_dump_config_.clear();
-  memory_dump_config_.push_back(kDefaultHeavyMemoryDumpTrigger);
-  memory_dump_config_.push_back(kDefaultLightMemoryDumpTrigger);
+  memory_dump_config_.Clear();
+  memory_dump_config_.triggers.push_back(kDefaultHeavyMemoryDumpTrigger);
+  memory_dump_config_.triggers.push_back(kDefaultLightMemoryDumpTrigger);
 }
 
 void TraceConfig::ToDict(base::DictionaryValue& dict) const {
@@ -554,11 +603,12 @@
   AddCategoryToDict(dict, kSyntheticDelaysParam, synthetic_delays_);
 
   if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
-    scoped_ptr<base::DictionaryValue> memory_dump_config(
+    std::unique_ptr<base::DictionaryValue> memory_dump_config(
         new base::DictionaryValue());
-    scoped_ptr<base::ListValue> triggers_list(new base::ListValue());
-    for (const MemoryDumpTriggerConfig& config : memory_dump_config_) {
-      scoped_ptr<base::DictionaryValue> trigger_dict(
+    std::unique_ptr<base::ListValue> triggers_list(new base::ListValue());
+    for (const MemoryDumpConfig::Trigger& config
+        : memory_dump_config_.triggers) {
+      std::unique_ptr<base::DictionaryValue> trigger_dict(
           new base::DictionaryValue());
       trigger_dict->SetInteger(kPeriodicIntervalParam,
                                static_cast<int>(config.periodic_interval_ms));
@@ -570,6 +620,17 @@
     // Empty triggers will still be specified explicitly since it means that
     // the periodic dumps are not enabled.
     memory_dump_config->Set(kTriggersParam, std::move(triggers_list));
+
+    if (memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes !=
+        MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes) {
+      std::unique_ptr<base::DictionaryValue> heap_profiler_options(
+          new base::DictionaryValue());
+      heap_profiler_options->SetInteger(
+          kBreakdownThresholdBytes,
+          memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
+      memory_dump_config->Set(kHeapProfilerOptions,
+                              std::move(heap_profiler_options));
+    }
     dict.Set(kMemoryDumpConfigParam, std::move(memory_dump_config));
   }
 }
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index 81b7d51..5b119ea 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -40,14 +40,38 @@
  public:
   typedef std::vector<std::string> StringList;
 
-  // Specifies the memory dump config for tracing. Used only when
-  // "memory-infra" category is enabled.
-  struct MemoryDumpTriggerConfig {
-    uint32_t periodic_interval_ms;
-    MemoryDumpLevelOfDetail level_of_detail;
-  };
+  // Specifies the memory dump config for tracing.
+  // Used only when "memory-infra" category is enabled.
+  struct MemoryDumpConfig {
+    MemoryDumpConfig();
+    MemoryDumpConfig(const MemoryDumpConfig& other);
+    ~MemoryDumpConfig();
 
-  typedef std::vector<MemoryDumpTriggerConfig> MemoryDumpConfig;
+    // Specifies the triggers in the memory dump config.
+    struct Trigger {
+      uint32_t periodic_interval_ms;
+      MemoryDumpLevelOfDetail level_of_detail;
+    };
+
+    // Specifies the configuration options for the heap profiler.
+    struct HeapProfiler {
+      // Default value for |breakdown_threshold_bytes|.
+      enum { kDefaultBreakdownThresholdBytes = 1024 };
+
+      HeapProfiler();
+
+      // Reset the options to default.
+      void Clear();
+
+      uint32_t breakdown_threshold_bytes;
+    };
+
+    // Reset the values in the config.
+    void Clear();
+
+    std::vector<Trigger> triggers;
+    HeapProfiler heap_profiler_options;
+  };
 
   TraceConfig();
 
@@ -130,6 +154,10 @@
   // disabled-by-default-memory-infra category is enabled.
   explicit TraceConfig(const std::string& config_string);
 
+  // Functionally identical to the above, but takes a parsed dictionary as input
+  // instead of its JSON serialization.
+  explicit TraceConfig(const DictionaryValue& config);
+
   TraceConfig(const TraceConfig& tc);
 
   ~TraceConfig();
@@ -154,7 +182,7 @@
   std::string ToString() const;
 
   // Returns a copy of the TraceConfig wrapped in a ConvertableToTraceFormat
-  scoped_ptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
+  std::unique_ptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
 
   // Write the string representation of the CategoryFilter part.
   std::string ToCategoryFilterString() const;
@@ -190,7 +218,10 @@
   // in the suffix 'Debug' or 'Test'.
   void InitializeDefault();
 
-  // Initialize from the config string
+  // Initialize from a config dictionary.
+  void InitializeFromConfigDict(const DictionaryValue& dict);
+
+  // Initialize from a config string.
   void InitializeFromConfigString(const std::string& config_string);
 
   // Initialize from category filter and trace options strings
diff --git a/base/trace_event/trace_config_memory_test_util.h b/base/trace_event/trace_config_memory_test_util.h
index 8d8206f..1acc62b 100644
--- a/base/trace_event/trace_config_memory_test_util.h
+++ b/base/trace_event/trace_config_memory_test_util.h
@@ -24,7 +24,10 @@
             "\"%s\""
           "],"
           "\"memory_dump_config\":{"
-            "\"triggers\":["
+             "\"heap_profiler_options\":{"
+               "\"breakdown_threshold_bytes\":2048"
+             "},"
+             "\"triggers\":["
               "{"
                 "\"mode\":\"light\","
                 "\"periodic_interval_ms\":%d"
diff --git a/base/trace_event/trace_config_unittest.cc b/base/trace_event/trace_config_unittest.cc
index bd37880..a173376 100644
--- a/base/trace_event/trace_config_unittest.cc
+++ b/base/trace_event/trace_config_unittest.cc
@@ -4,6 +4,7 @@
 
 #include <stddef.h>
 
+#include "base/json/json_reader.h"
 #include "base/macros.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/trace_config.h"
@@ -23,6 +24,30 @@
     "\"excluded_categories\":[\"*Debug\",\"*Test\"],"
     "\"record_mode\":\"record-until-full\""
   "}";
+
+const char kCustomTraceConfigString[] =
+  "{"
+    "\"enable_argument_filter\":true,"
+    "\"enable_sampling\":true,"
+    "\"enable_systrace\":true,"
+    "\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
+    "\"included_categories\":[\"included\","
+                            "\"inc_pattern*\","
+                            "\"disabled-by-default-cc\","
+                            "\"disabled-by-default-memory-infra\"],"
+    "\"memory_dump_config\":{"
+      "\"heap_profiler_options\":{"
+        "\"breakdown_threshold_bytes\":10240"
+      "},"
+      "\"triggers\":["
+        "{\"mode\":\"light\",\"periodic_interval_ms\":50},"
+        "{\"mode\":\"detailed\",\"periodic_interval_ms\":1000}"
+      "]"
+    "},"
+    "\"record_mode\":\"record-continuously\","
+    "\"synthetic_delays\":[\"test.Delay1;16\",\"test.Delay2;32\"]"
+  "}";
+
 }  // namespace
 
 TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
@@ -259,6 +284,50 @@
   EXPECT_FALSE(tc.IsCategoryGroupEnabled("CategoryDebug,CategoryTest"));
 }
 
+TEST(TraceConfigTest, TraceConfigFromDict) {
+  // Passing in empty dictionary will not result in default trace config.
+  DictionaryValue dict;
+  TraceConfig tc(dict);
+  EXPECT_STRNE(kDefaultTraceConfigString, tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
+  EXPECT_FALSE(tc.IsSamplingEnabled());
+  EXPECT_FALSE(tc.IsSystraceEnabled());
+  EXPECT_FALSE(tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
+
+  std::unique_ptr<Value> default_value(
+      JSONReader::Read(kDefaultTraceConfigString));
+  DCHECK(default_value);
+  const DictionaryValue* default_dict = nullptr;
+  bool is_dict = default_value->GetAsDictionary(&default_dict);
+  DCHECK(is_dict);
+  TraceConfig default_tc(*default_dict);
+  EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
+  EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
+  EXPECT_FALSE(default_tc.IsSamplingEnabled());
+  EXPECT_FALSE(default_tc.IsSystraceEnabled());
+  EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("-*Debug,-*Test", default_tc.ToCategoryFilterString().c_str());
+
+  std::unique_ptr<Value> custom_value(
+      JSONReader::Read(kCustomTraceConfigString));
+  DCHECK(custom_value);
+  const DictionaryValue* custom_dict = nullptr;
+  is_dict = custom_value->GetAsDictionary(&custom_dict);
+  DCHECK(is_dict);
+  TraceConfig custom_tc(*custom_dict);
+  EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
+  EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
+  EXPECT_TRUE(custom_tc.IsSamplingEnabled());
+  EXPECT_TRUE(custom_tc.IsSystraceEnabled());
+  EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
+  EXPECT_STREQ("included,inc_pattern*,"
+               "disabled-by-default-cc,disabled-by-default-memory-infra,"
+               "-excluded,-exc_pattern*,"
+               "DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
+               custom_tc.ToCategoryFilterString().c_str());
+}
+
 TEST(TraceConfigTest, TraceConfigFromValidString) {
   // Using some non-empty config string.
   const char config_string[] =
@@ -504,15 +573,17 @@
   TraceConfig tc(tc_str);
   EXPECT_EQ(tc_str, tc.ToString());
   EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
-  EXPECT_EQ(2u, tc.memory_dump_config_.size());
+  ASSERT_EQ(2u, tc.memory_dump_config_.triggers.size());
 
-  EXPECT_EQ(200u, tc.memory_dump_config_[0].periodic_interval_ms);
+  EXPECT_EQ(200u, tc.memory_dump_config_.triggers[0].periodic_interval_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
-            tc.memory_dump_config_[0].level_of_detail);
+            tc.memory_dump_config_.triggers[0].level_of_detail);
 
-  EXPECT_EQ(2000u, tc.memory_dump_config_[1].periodic_interval_ms);
+  EXPECT_EQ(2000u, tc.memory_dump_config_.triggers[1].periodic_interval_ms);
   EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
-            tc.memory_dump_config_[1].level_of_detail);
+            tc.memory_dump_config_.triggers[1].level_of_detail);
+  EXPECT_EQ(2048u, tc.memory_dump_config_.heap_profiler_options.
+            breakdown_threshold_bytes);
 }
 
 TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
@@ -520,14 +591,22 @@
   TraceConfig tc(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers());
   EXPECT_EQ(TraceConfigMemoryTestUtil::GetTraceConfig_EmptyTriggers(),
             tc.ToString());
-  EXPECT_EQ(0u, tc.memory_dump_config_.size());
+  EXPECT_EQ(0u, tc.memory_dump_config_.triggers.size());
+  EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+            ::kDefaultBreakdownThresholdBytes,
+            tc.memory_dump_config_.heap_profiler_options
+            .breakdown_threshold_bytes);
 }
 
 TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
   TraceConfig tc(MemoryDumpManager::kTraceCategory, "");
   EXPECT_TRUE(tc.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
   EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
-  EXPECT_EQ(2u, tc.memory_dump_config_.size());
+  EXPECT_EQ(2u, tc.memory_dump_config_.triggers.size());
+  EXPECT_EQ(TraceConfig::MemoryDumpConfig::HeapProfiler
+            ::kDefaultBreakdownThresholdBytes,
+            tc.memory_dump_config_.heap_profiler_options
+            .breakdown_threshold_bytes);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
index d5583fa..4335ea1 100644
--- a/base/trace_event/trace_event.gypi
+++ b/base/trace_event/trace_event.gypi
@@ -4,7 +4,10 @@
 {
   'variables': {
     'trace_event_sources' : [
+      'trace_event/blame_context.cc',
+      'trace_event/blame_context.h',
       'trace_event/common/trace_event_common.h',
+      'trace_event/heap_profiler.h',
       'trace_event/heap_profiler_allocation_context.cc',
       'trace_event/heap_profiler_allocation_context.h',
       'trace_event/heap_profiler_allocation_context_tracker.cc',
@@ -67,6 +70,7 @@
       'trace_event/winheap_dump_provider_win.h',
     ],
     'trace_event_test_sources' : [
+      'trace_event/blame_context_unittest.cc',
       'trace_event/heap_profiler_allocation_context_tracker_unittest.cc',
       'trace_event/heap_profiler_allocation_register_unittest.cc',
       'trace_event/heap_profiler_heap_dump_writer_unittest.cc',
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
index 6255bc0..a075898 100644
--- a/base/trace_event/trace_event.h
+++ b/base/trace_event/trace_event.h
@@ -18,6 +18,7 @@
 #include "base/macros.h"
 #include "base/time/time.h"
 #include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/trace_event_system_stats_monitor.h"
 #include "base/trace_event/trace_log.h"
 #include "build/build_config.h"
@@ -110,7 +111,7 @@
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    scoped_ptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT \
@@ -129,7 +130,7 @@
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    scoped_ptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID \
@@ -149,7 +150,7 @@
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    scoped_ptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID \
@@ -169,7 +170,7 @@
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    scoped_ptr<ConvertableToTraceFormat>*
+//                    std::unique_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
@@ -190,7 +191,7 @@
 //     const unsigned char* category_group_enabled,
 //     const char* event_name,
 //     const char* arg_name,
-//     scoped_ptr<ConvertableToTraceFormat> arg_value)
+//     std::unique_ptr<ConvertableToTraceFormat> arg_value)
 #define TRACE_EVENT_API_ADD_METADATA_EVENT \
     trace_event_internal::AddMetadataEvent
 
@@ -382,6 +383,15 @@
   INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
   INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
 
+// Implementation detail: internal macro to trace a task execution with the
+// location where it was posted from.
+#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task)                 \
+  TRACE_EVENT2("toplevel", run_function, "src_file",                      \
+               (task).posted_from.file_name(), "src_func",                \
+               (task).posted_from.function_name());                       \
+  TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
+      task_event)((task).posted_from.file_name());
+
 namespace trace_event_internal {
 
 // Specify these values when the corresponding argument of AddTraceEvent is not
@@ -464,9 +474,8 @@
   TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(DontMangle maybe_scoped_id, unsigned int* /* flags */)
-      : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {
-  }
+  TraceID(DontMangle maybe_scoped_id, unsigned int* /*flags*/)
+      : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {}
   TraceID(unsigned long long raw_id, unsigned int* flags) : raw_id_(raw_id) {
     (void)flags;
   }
@@ -492,7 +501,7 @@
       : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
   TraceID(signed char raw_id, unsigned int* flags)
       : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
-  TraceID(WithScope scoped_id, unsigned int* /* flags */)
+  TraceID(WithScope scoped_id, unsigned int* /*flags*/)
       : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
 
   unsigned long long raw_id() const { return raw_id_; }
@@ -624,10 +633,10 @@
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
   const int num_args = 1;
   unsigned char arg_types[1] = { TRACE_VALUE_TYPE_CONVERTABLE };
-  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
       convertable_values[1] = {std::move(arg1_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
       phase, category_group_enabled, name, scope, id, bind_id, thread_id,
@@ -650,7 +659,7 @@
     const char* arg1_name,
     const ARG1_TYPE& arg1_val,
     const char* arg2_name,
-    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = { arg1_name, arg2_name };
 
@@ -658,7 +667,7 @@
   unsigned long long arg_values[2];
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   arg_types[1] = TRACE_VALUE_TYPE_CONVERTABLE;
-  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
       convertable_values[2] = {nullptr, std::move(arg2_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
       phase, category_group_enabled, name, scope, id, bind_id, thread_id,
@@ -679,7 +688,7 @@
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
     const ARG2_TYPE& arg2_val) {
   const int num_args = 2;
@@ -690,7 +699,7 @@
   arg_types[0] = TRACE_VALUE_TYPE_CONVERTABLE;
   arg_values[0] = 0;
   SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
-  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
       convertable_values[2] = {std::move(arg1_val), nullptr};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
       phase, category_group_enabled, name, scope, id, bind_id, thread_id,
@@ -711,14 +720,14 @@
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
-    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = { arg1_name, arg2_name };
   unsigned char arg_types[2] =
       { TRACE_VALUE_TYPE_CONVERTABLE, TRACE_VALUE_TYPE_CONVERTABLE };
-  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
       convertable_values[2] = {std::move(arg1_val), std::move(arg2_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
       phase, category_group_enabled, name, scope, id, bind_id, thread_id,
@@ -808,7 +817,7 @@
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
@@ -853,7 +862,7 @@
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
     const ARG2_TYPE& arg2_val) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
@@ -875,7 +884,7 @@
     const char* arg1_name,
     const ARG1_TYPE& arg1_val,
     const char* arg2_name,
-    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
@@ -893,9 +902,9 @@
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
-    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+    std::unique_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
@@ -928,10 +937,10 @@
     const unsigned char* category_group_enabled,
     const char* event_name,
     const char* arg_name,
-    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg_value) {
+    std::unique_ptr<ARG1_CONVERTABLE_TYPE> arg_value) {
   const char* arg_names[1] = {arg_name};
   unsigned char arg_types[1] = {TRACE_VALUE_TYPE_CONVERTABLE};
-  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+  std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
       convertable_values[1] = {std::move(arg_value)};
   base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
       category_group_enabled, event_name,
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
index 6d787c8..8babf3b 100644
--- a/base/trace_event/trace_event_argument.cc
+++ b/base/trace_event/trace_event_argument.cc
@@ -10,6 +10,7 @@
 
 #include "base/bits.h"
 #include "base/json/json_writer.h"
+#include "base/memory/ptr_util.h"
 #include "base/trace_event/trace_event_memory_overhead.h"
 #include "base/values.h"
 
@@ -234,7 +235,8 @@
   pickle_.WriteBytes(&kTypeEndArray, 1);
 }
 
-void TracedValue::SetValue(const char* name, scoped_ptr<base::Value> value) {
+void TracedValue::SetValue(const char* name,
+                           std::unique_ptr<base::Value> value) {
   SetBaseValueWithCopiedName(name, *value);
 }
 
@@ -347,8 +349,8 @@
   }
 }
 
-scoped_ptr<base::Value> TracedValue::ToBaseValue() const {
-  scoped_ptr<DictionaryValue> root(new DictionaryValue);
+std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
+  std::unique_ptr<DictionaryValue> root(new DictionaryValue);
   DictionaryValue* cur_dict = root.get();
   ListValue* cur_list = nullptr;
   std::vector<Value*> stack;
@@ -362,11 +364,11 @@
         auto new_dict = new DictionaryValue();
         if (cur_dict) {
           cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
-                                            make_scoped_ptr(new_dict));
+                                            WrapUnique(new_dict));
           stack.push_back(cur_dict);
           cur_dict = new_dict;
         } else {
-          cur_list->Append(make_scoped_ptr(new_dict));
+          cur_list->Append(WrapUnique(new_dict));
           stack.push_back(cur_list);
           cur_list = nullptr;
           cur_dict = new_dict;
@@ -387,12 +389,12 @@
         auto new_list = new ListValue();
         if (cur_dict) {
           cur_dict->SetWithoutPathExpansion(ReadKeyName(it),
-                                            make_scoped_ptr(new_list));
+                                            WrapUnique(new_list));
           stack.push_back(cur_dict);
           cur_dict = nullptr;
           cur_list = new_list;
         } else {
-          cur_list->Append(make_scoped_ptr(new_list));
+          cur_list->Append(WrapUnique(new_list));
           stack.push_back(cur_list);
           cur_list = new_list;
         }
@@ -460,14 +462,11 @@
 
 void TracedValue::EstimateTraceMemoryOverhead(
     TraceEventMemoryOverhead* overhead) {
-  const size_t kPickleHeapAlign = 4096;  // Must be == Pickle::kPickleHeapAlign.
   overhead->Add("TracedValue",
-
                 /* allocated size */
-                bits::Align(pickle_.GetTotalAllocatedSize(), kPickleHeapAlign),
-
+                pickle_.GetTotalAllocatedSize(),
                 /* resident size */
-                bits::Align(pickle_.size(), kPickleHeapAlign));
+                pickle_.size());
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/trace_event_argument.h b/base/trace_event/trace_event_argument.h
index d706479..81d8c01 100644
--- a/base/trace_event/trace_event_argument.h
+++ b/base/trace_event/trace_event_argument.h
@@ -7,11 +7,11 @@
 
 #include <stddef.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/pickle.h"
 #include "base/strings/string_piece.h"
 #include "base/trace_event/trace_event_impl.h"
@@ -67,13 +67,13 @@
   // a copy-and-translation of the base::Value into the equivalent TracedValue.
   // TODO(primiano): migrate the (three) existing clients to the cheaper
   // SetValue(TracedValue) API. crbug.com/495628.
-  void SetValue(const char* name, scoped_ptr<base::Value> value);
+  void SetValue(const char* name, std::unique_ptr<base::Value> value);
   void SetBaseValueWithCopiedName(base::StringPiece name,
                                   const base::Value& value);
   void AppendBaseValue(const base::Value& value);
 
   // Public for tests only.
-  scoped_ptr<base::Value> ToBaseValue() const;
+  std::unique_ptr<base::Value> ToBaseValue() const;
 
  private:
   Pickle pickle_;
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
index 644d494..61395f4 100644
--- a/base/trace_event/trace_event_argument_unittest.cc
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -8,6 +8,7 @@
 
 #include <utility>
 
+#include "base/memory/ptr_util.h"
 #include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -15,7 +16,7 @@
 namespace trace_event {
 
 TEST(TraceEventArgumentTest, FlatDictionary) {
-  scoped_ptr<TracedValue> value(new TracedValue());
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("int", 2014);
   value->SetDouble("double", 0.0);
   value->SetBoolean("bool", true);
@@ -28,7 +29,7 @@
 }
 
 TEST(TraceEventArgumentTest, NoDotPathExpansion) {
-  scoped_ptr<TracedValue> value(new TracedValue());
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("in.t", 2014);
   value->SetDouble("doub.le", 0.0);
   value->SetBoolean("bo.ol", true);
@@ -41,7 +42,7 @@
 }
 
 TEST(TraceEventArgumentTest, Hierarchy) {
-  scoped_ptr<TracedValue> value(new TracedValue());
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("i0", 2014);
   value->BeginDictionary("dict1");
   value->SetInteger("i1", 2014);
@@ -77,7 +78,7 @@
     kLongString3[i] = 'a' + (i % 25);
   kLongString3[sizeof(kLongString3) - 1] = '\0';
 
-  scoped_ptr<TracedValue> value(new TracedValue());
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->SetString("a", "short");
   value->SetString("b", kLongString);
   value->BeginArray("c");
@@ -100,20 +101,20 @@
   FundamentalValue bool_value(true);
   FundamentalValue double_value(42.0f);
 
-  auto dict_value = make_scoped_ptr(new DictionaryValue);
+  auto dict_value = WrapUnique(new DictionaryValue);
   dict_value->SetBoolean("bool", true);
   dict_value->SetInteger("int", 42);
   dict_value->SetDouble("double", 42.0f);
   dict_value->SetString("string", std::string("a") + "b");
   dict_value->SetString("string", std::string("a") + "b");
 
-  auto list_value = make_scoped_ptr(new ListValue);
+  auto list_value = WrapUnique(new ListValue);
   list_value->AppendBoolean(false);
   list_value->AppendInteger(1);
   list_value->AppendString("in_list");
   list_value->Append(std::move(dict_value));
 
-  scoped_ptr<TracedValue> value(new TracedValue());
+  std::unique_ptr<TracedValue> value(new TracedValue());
   value->BeginDictionary("outer_dict");
   value->SetValue("inner_list", std::move(list_value));
   value->EndDictionary();
@@ -130,10 +131,10 @@
 }
 
 TEST(TraceEventArgumentTest, PassTracedValue) {
-  auto dict_value = make_scoped_ptr(new TracedValue());
+  auto dict_value = WrapUnique(new TracedValue());
   dict_value->SetInteger("a", 1);
 
-  auto nested_dict_value = make_scoped_ptr(new TracedValue());
+  auto nested_dict_value = WrapUnique(new TracedValue());
   nested_dict_value->SetInteger("b", 2);
   nested_dict_value->BeginArray("c");
   nested_dict_value->AppendString("foo");
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index c0dc843..e2e250e 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -56,7 +56,7 @@
 TraceEvent::~TraceEvent() {
 }
 
-void TraceEvent::MoveFrom(scoped_ptr<TraceEvent> other) {
+void TraceEvent::MoveFrom(std::unique_ptr<TraceEvent> other) {
   timestamp_ = other->timestamp_;
   thread_timestamp_ = other->thread_timestamp_;
   duration_ = other->duration_;
@@ -94,7 +94,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   timestamp_ = timestamp;
   thread_timestamp_ = thread_timestamp;
@@ -293,10 +293,10 @@
   // Category group checked at category creation time.
   DCHECK(!strchr(name_, '"'));
   StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
-                     ","
-                     "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":",
-                process_id, thread_id, time_int64, phase_, category_group_name,
-                name_);
+                     ",\"ph\":\"%c\",\"cat\":\"%s\",\"name\":",
+                process_id, thread_id, time_int64, phase_, category_group_name);
+  EscapeJSONString(name_, true, out);
+  *out += ",\"args\":";
 
   // Output argument names and values, stop at first NULL argument name.
   // TODO(oysteine): The dual predicates here is a bit ugly; if the filtering
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
index df7151a..4382217 100644
--- a/base/trace_event/trace_event_impl.h
+++ b/base/trace_event/trace_event_impl.h
@@ -8,6 +8,7 @@
 
 #include <stdint.h>
 
+#include <memory>
 #include <stack>
 #include <string>
 #include <vector>
@@ -17,7 +18,6 @@
 #include "base/callback.h"
 #include "base/containers/hash_tables.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/observer_list.h"
 #include "base/single_thread_task_runner.h"
 #include "base/strings/string_util.h"
@@ -92,7 +92,7 @@
   TraceEvent();
   ~TraceEvent();
 
-  void MoveFrom(scoped_ptr<TraceEvent> other);
+  void MoveFrom(std::unique_ptr<TraceEvent> other);
 
   void Initialize(int thread_id,
                   TimeTicks timestamp,
@@ -107,7 +107,7 @@
                   const char** arg_names,
                   const unsigned char* arg_types,
                   const unsigned long long* arg_values,
-                  scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+                  std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
                   unsigned int flags);
 
   void Reset();
@@ -163,10 +163,11 @@
   unsigned long long id_;
   TraceValue arg_values_[kTraceMaxNumArgs];
   const char* arg_names_[kTraceMaxNumArgs];
-  scoped_ptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
+  std::unique_ptr<ConvertableToTraceFormat>
+      convertable_values_[kTraceMaxNumArgs];
   const unsigned char* category_group_enabled_;
   const char* name_;
-  scoped_ptr<std::string> parameter_copy_storage_;
+  std::unique_ptr<std::string> parameter_copy_storage_;
   // Depending on TRACE_EVENT_FLAG_HAS_PROCESS_ID the event will have either:
   //  tid: thread_id_, pid: current_process_id (default case).
   //  tid: -1, pid: process_id_ (when flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID).
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index c98c698..e626a77 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -2,11 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/trace_event/trace_event.h"
+
 #include <math.h>
 #include <stddef.h>
 #include <stdint.h>
 
 #include <cstdlib>
+#include <memory>
 
 #include "base/bind.h"
 #include "base/command_line.h"
@@ -15,7 +18,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted_memory.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/singleton.h"
 #include "base/process/process_handle.h"
 #include "base/single_thread_task_runner.h"
@@ -27,7 +29,6 @@
 #include "base/threading/thread.h"
 #include "base/time/time.h"
 #include "base/trace_event/trace_buffer.h"
-#include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_synthetic_delay.h"
 #include "base/values.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -183,7 +184,7 @@
   trace_buffer_.AddFragment(events_str->data());
   trace_buffer_.Finish();
 
-  scoped_ptr<Value> root = base::JSONReader::Read(
+  std::unique_ptr<Value> root = base::JSONReader::Read(
       json_output_.json_output, JSON_PARSE_RFC | JSON_DETACHABLE_CHILDREN);
 
   if (!root.get()) {
@@ -196,7 +197,7 @@
 
   // Move items into our aggregate collection
   while (root_list->GetSize()) {
-    scoped_ptr<Value> item;
+    std::unique_ptr<Value> item;
     root_list->Remove(0, &item);
     trace_parsed_.Append(item.release());
   }
@@ -266,15 +267,15 @@
 }
 
 void TraceEventTestFixture::DropTracedMetadataRecords() {
-  scoped_ptr<ListValue> old_trace_parsed(trace_parsed_.DeepCopy());
+  std::unique_ptr<ListValue> old_trace_parsed(trace_parsed_.CreateDeepCopy());
   size_t old_trace_parsed_size = old_trace_parsed->GetSize();
   trace_parsed_.Clear();
 
   for (size_t i = 0; i < old_trace_parsed_size; i++) {
-    Value* value = NULL;
+    Value* value = nullptr;
     old_trace_parsed->Get(i, &value);
     if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
-      trace_parsed_.Append(value->DeepCopy());
+      trace_parsed_.Append(value->CreateDeepCopy());
       continue;
     }
     DictionaryValue* dict = static_cast<DictionaryValue*>(value);
@@ -282,7 +283,7 @@
     if (dict->GetString("ph", &tmp) && tmp == "M")
       continue;
 
-    trace_parsed_.Append(value->DeepCopy());
+    trace_parsed_.Append(value->CreateDeepCopy());
   }
 }
 
@@ -1247,8 +1248,8 @@
     int* num_calls_;
   };
 
-  scoped_ptr<ConvertableToTraceFormat> conv1(new Convertable(&num_calls));
-  scoped_ptr<Convertable> conv2(new Convertable(&num_calls));
+  std::unique_ptr<ConvertableToTraceFormat> conv1(new Convertable(&num_calls));
+  std::unique_ptr<Convertable> conv2(new Convertable(&num_calls));
 
   BeginTrace();
   TRACE_EVENT_API_ADD_METADATA_EVENT(
@@ -2054,19 +2055,20 @@
   TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
                                       TraceLog::RECORDING_MODE);
 
-  scoped_ptr<ConvertableToTraceFormat> data(new MyData());
-  scoped_ptr<ConvertableToTraceFormat> data1(new MyData());
-  scoped_ptr<ConvertableToTraceFormat> data2(new MyData());
+  std::unique_ptr<ConvertableToTraceFormat> data(new MyData());
+  std::unique_ptr<ConvertableToTraceFormat> data1(new MyData());
+  std::unique_ptr<ConvertableToTraceFormat> data2(new MyData());
   TRACE_EVENT1("foo", "bar", "data", std::move(data));
   TRACE_EVENT2("foo", "baz", "data1", std::move(data1), "data2",
                std::move(data2));
 
-  // Check that scoped_ptr<DerivedClassOfConvertable> are properly treated as
+  // Check that std::unique_ptr<DerivedClassOfConvertable> are properly treated
+  // as
   // convertable and not accidentally casted to bool.
-  scoped_ptr<MyData> convertData1(new MyData());
-  scoped_ptr<MyData> convertData2(new MyData());
-  scoped_ptr<MyData> convertData3(new MyData());
-  scoped_ptr<MyData> convertData4(new MyData());
+  std::unique_ptr<MyData> convertData1(new MyData());
+  std::unique_ptr<MyData> convertData2(new MyData());
+  std::unique_ptr<MyData> convertData3(new MyData());
+  std::unique_ptr<MyData> convertData4(new MyData());
   TRACE_EVENT2("foo", "string_first", "str", "string value 1", "convert",
                std::move(convertData1));
   TRACE_EVENT2("foo", "string_second", "convert", std::move(convertData2),
@@ -2328,6 +2330,16 @@
   EXPECT_EQ(1, int_value);
 }
 
+TEST_F(TraceEventTestFixture, NameIsEscaped) {
+  TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+                                      TraceLog::RECORDING_MODE);
+  TRACE_EVENT0("category", "name\\with\\backspaces");
+  EndTraceAndFlush();
+
+  EXPECT_TRUE(FindMatchingValue("cat", "category"));
+  EXPECT_TRUE(FindMatchingValue("name", "name\\with\\backspaces"));
+}
+
 namespace {
 
 bool IsArgNameWhitelisted(const char* arg_name) {
@@ -2686,7 +2698,8 @@
   size_t chunk_index;
   EXPECT_EQ(0u, buffer->Size());
 
-  scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[num_chunks]);
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[num_chunks]);
   for (size_t i = 0; i < num_chunks; ++i) {
     chunks[i] = buffer->GetChunk(&chunk_index).release();
     EXPECT_TRUE(chunks[i]);
@@ -2702,7 +2715,7 @@
 
   // Return all chunks in original order.
   for (size_t i = 0; i < num_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   // Should recycle the chunks in the returned order.
   for (size_t i = 0; i < num_chunks; ++i) {
@@ -2715,9 +2728,8 @@
 
   // Return all chunks in reverse order.
   for (size_t i = 0; i < num_chunks; ++i) {
-    buffer->ReturnChunk(
-        num_chunks - i - 1,
-        scoped_ptr<TraceBufferChunk>(chunks[num_chunks - i - 1]));
+    buffer->ReturnChunk(num_chunks - i - 1, std::unique_ptr<TraceBufferChunk>(
+                                                chunks[num_chunks - i - 1]));
   }
 
   // Should recycle the chunks in the returned order.
@@ -2730,7 +2742,7 @@
   }
 
   for (size_t i = 0; i < num_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   TraceLog::GetInstance()->SetDisabled();
 }
@@ -2747,7 +2759,8 @@
   EXPECT_FALSE(buffer->NextChunk());
 
   size_t half_chunks = num_chunks / 2;
-  scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[half_chunks]);
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[half_chunks]);
 
   for (size_t i = 0; i < half_chunks; ++i) {
     chunks[i] = buffer->GetChunk(&chunk_index).release();
@@ -2755,7 +2768,7 @@
     EXPECT_EQ(i, chunk_index);
   }
   for (size_t i = 0; i < half_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   for (size_t i = 0; i < half_chunks; ++i)
     EXPECT_EQ(chunks[i], buffer->NextChunk());
@@ -2774,7 +2787,8 @@
   EXPECT_EQ(0u, buffer->Size());
   EXPECT_FALSE(buffer->NextChunk());
 
-  scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[num_chunks]);
+  std::unique_ptr<TraceBufferChunk* []> chunks(
+      new TraceBufferChunk*[num_chunks]);
 
   for (size_t i = 0; i < num_chunks; ++i) {
     chunks[i] = buffer->GetChunk(&chunk_index).release();
@@ -2782,7 +2796,7 @@
     EXPECT_EQ(i, chunk_index);
   }
   for (size_t i = 0; i < num_chunks; ++i)
-    buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+    buffer->ReturnChunk(i, std::unique_ptr<TraceBufferChunk>(chunks[i]));
 
   for (size_t i = 0; i < num_chunks; ++i)
     EXPECT_TRUE(chunks[i] == buffer->NextChunk());
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 09d9f96..1da42bf 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -6,16 +6,17 @@
 
 #include <algorithm>
 #include <cmath>
+#include <memory>
 #include <utility>
 
 #include "base/base_switches.h"
 #include "base/bind.h"
 #include "base/command_line.h"
+#include "base/debug/leak_annotations.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted_memory.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/singleton.h"
 #include "base/process/process_metrics.h"
 #include "base/stl_util.h"
@@ -23,11 +24,13 @@
 #include "base/strings/string_tokenizer.h"
 #include "base/strings/stringprintf.h"
 #include "base/sys_info.h"
-#include "base/thread_task_runner_handle.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
 #include "base/threading/platform_thread.h"
 #include "base/threading/thread_id_name_manager.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/threading/worker_pool.h"
 #include "base/time/time.h"
+#include "base/trace_event/heap_profiler.h"
 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
 #include "base/trace_event/memory_dump_manager.h"
 #include "base/trace_event/memory_dump_provider.h"
@@ -83,7 +86,7 @@
 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
 const int kThreadFlushTimeoutMs = 3000;
 
-#define MAX_CATEGORY_GROUPS 100
+#define MAX_CATEGORY_GROUPS 105
 
 // Parallel arrays g_category_groups and g_category_group_enabled are separate
 // so that a pointer to a member of g_category_group_enabled can be easily
@@ -239,7 +242,7 @@
   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
   // as long as the thread exists.
   TraceLog* trace_log_;
-  scoped_ptr<TraceBufferChunk> chunk_;
+  std::unique_ptr<TraceBufferChunk> chunk_;
   size_t chunk_index_;
   int generation_;
 
@@ -305,9 +308,8 @@
   delete this;
 }
 
-bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(
-    const MemoryDumpArgs& /* args */,
-    ProcessMemoryDump* pmd) {
+bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs&,
+                                                    ProcessMemoryDump* pmd) {
   if (!chunk_)
     return true;
   std::string dump_base_name = StringPrintf(
@@ -331,6 +333,15 @@
   // find the generation mismatch and delete this buffer soon.
 }
 
+struct TraceLog::RegisteredAsyncObserver {
+  RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
+      : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
+  ~RegisteredAsyncObserver() {}
+
+  WeakPtr<AsyncEnabledStateObserver> observer;
+  scoped_refptr<SequencedTaskRunner> task_runner;
+};
+
 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
 
 TraceLogStatus::~TraceLogStatus() {}
@@ -364,6 +375,10 @@
   // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
   //                            sizeof(g_category_group_enabled),
   //                           "trace_event category enabled");
+  for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
+    ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
+                         "trace_event category enabled");
+  }
 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
   SetProcessID(0);
 #else
@@ -386,6 +401,7 @@
   // trace events will be added into the main buffer directly.
   if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
     return;
+  HEAP_PROFILER_SCOPED_IGNORE;
   auto thread_local_event_buffer = thread_local_event_buffer_.Get();
   if (thread_local_event_buffer &&
       !CheckGeneration(thread_local_event_buffer->generation())) {
@@ -398,8 +414,7 @@
   }
 }
 
-bool TraceLog::OnMemoryDump(const MemoryDumpArgs& /* args */,
-                            ProcessMemoryDump* pmd) {
+bool TraceLog::OnMemoryDump(const MemoryDumpArgs&, ProcessMemoryDump* pmd) {
   // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
   // (crbug.com/499731).
   TraceEventMemoryOverhead overhead;
@@ -536,6 +551,7 @@
     // category groups with strings not known at compile time (this is
     // required by SetWatchEvent).
     const char* new_group = strdup(category_group);
+    ANNOTATE_LEAKING_OBJECT_PTR(new_group);
     g_category_groups[category_index] = new_group;
     DCHECK(!g_category_group_enabled[category_index]);
     // Note that if both included and excluded patterns in the
@@ -562,6 +578,7 @@
 
 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
   std::vector<EnabledStateObserver*> observer_list;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
   {
     AutoLock lock(lock_);
 
@@ -626,10 +643,16 @@
 
     dispatching_to_observer_list_ = true;
     observer_list = enabled_state_observer_list_;
+    observer_map = async_observers_;
   }
   // Notify observers outside the lock in case they trigger trace events.
   for (size_t i = 0; i < observer_list.size(); ++i)
     observer_list[i]->OnTraceLogEnabled();
+  for (const auto& it : observer_map) {
+    it.second.task_runner->PostTask(
+        FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled,
+                        it.second.observer));
+  }
 
   {
     AutoLock lock(lock_);
@@ -711,6 +734,8 @@
   dispatching_to_observer_list_ = true;
   std::vector<EnabledStateObserver*> observer_list =
       enabled_state_observer_list_;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map =
+      async_observers_;
 
   {
     // Dispatch to observers outside the lock in case the observer triggers a
@@ -718,6 +743,11 @@
     AutoUnlock unlock(lock_);
     for (size_t i = 0; i < observer_list.size(); ++i)
       observer_list[i]->OnTraceLogDisabled();
+    for (const auto& it : observer_map) {
+      it.second.task_runner->PostTask(
+          FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled,
+                          it.second.observer));
+    }
   }
   dispatching_to_observer_list_ = false;
 }
@@ -895,12 +925,13 @@
 
 // Usually it runs on a different thread.
 void TraceLog::ConvertTraceEventsToTraceFormat(
-    scoped_ptr<TraceBuffer> logged_events,
+    std::unique_ptr<TraceBuffer> logged_events,
     const OutputCallback& flush_output_callback,
     const ArgumentFilterPredicate& argument_filter_predicate) {
   if (flush_output_callback.is_null())
     return;
 
+  HEAP_PROFILER_SCOPED_IGNORE;
   // The callback need to be called at least once even if there is no events
   // to let the caller know the completion of flush.
   scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
@@ -921,7 +952,7 @@
 }
 
 void TraceLog::FinishFlush(int generation, bool discard_events) {
-  scoped_ptr<TraceBuffer> previous_logged_events;
+  std::unique_ptr<TraceBuffer> previous_logged_events;
   OutputCallback flush_output_callback;
   ArgumentFilterPredicate argument_filter_predicate;
 
@@ -1029,7 +1060,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
@@ -1061,7 +1092,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
@@ -1093,7 +1124,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
@@ -1127,7 +1158,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   return AddTraceEventWithThreadIdAndTimestamp(
       phase,
@@ -1159,7 +1190,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   TraceEventHandle handle = {0, 0, 0};
   if (!*category_group_enabled)
@@ -1310,14 +1341,17 @@
 
   // TODO(primiano): Add support for events with copied name crbug.com/581078
   if (!(flags & TRACE_EVENT_FLAG_COPY)) {
-    if (AllocationContextTracker::capture_enabled()) {
+    if (AllocationContextTracker::capture_mode() ==
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
       if (phase == TRACE_EVENT_PHASE_BEGIN ||
-          phase == TRACE_EVENT_PHASE_COMPLETE)
-        AllocationContextTracker::PushPseudoStackFrame(name);
-      else if (phase == TRACE_EVENT_PHASE_END)
+          phase == TRACE_EVENT_PHASE_COMPLETE) {
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->PushPseudoStackFrame(name);
+      } else if (phase == TRACE_EVENT_PHASE_END)
         // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
         // is in |TraceLog::UpdateTraceEventDuration|.
-        AllocationContextTracker::PopPseudoStackFrame(name);
+        AllocationContextTracker::GetInstanceForCurrentThread()
+            ->PopPseudoStackFrame(name);
     }
   }
 
@@ -1331,9 +1365,10 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+    std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
-  scoped_ptr<TraceEvent> trace_event(new TraceEvent);
+  HEAP_PROFILER_SCOPED_IGNORE;
+  std::unique_ptr<TraceEvent> trace_event(new TraceEvent);
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   ThreadTicks thread_now = ThreadNow();
   TimeTicks now = OffsetNow();
@@ -1353,6 +1388,7 @@
 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
                                             const TimeTicks& timestamp,
                                             TraceEvent* trace_event) {
+  HEAP_PROFILER_SCOPED_IGNORE;
   AutoLock thread_info_lock(thread_info_lock_);
 
   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
@@ -1439,9 +1475,11 @@
           EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
     }
 
-    if (base::trace_event::AllocationContextTracker::capture_enabled()) {
+    if (AllocationContextTracker::capture_mode() ==
+        AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
       // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
-      base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
+      AllocationContextTracker::GetInstanceForCurrentThread()
+          ->PopPseudoStackFrame(name);
     }
   }
 
@@ -1655,6 +1693,7 @@
 }
 
 TraceBuffer* TraceLog::CreateTraceBuffer() {
+  HEAP_PROFILER_SCOPED_IGNORE;
   InternalTraceOptions options = trace_options();
   if (options & kInternalRecordContinuously)
     return TraceBuffer::CreateTraceBufferRingBuffer(
@@ -1693,6 +1732,25 @@
   overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
 }
 
+void TraceLog::AddAsyncEnabledStateObserver(
+    WeakPtr<AsyncEnabledStateObserver> listener) {
+  AutoLock lock(lock_);
+  async_observers_.insert(
+      std::make_pair(listener.get(), RegisteredAsyncObserver(listener)));
+}
+
+void TraceLog::RemoveAsyncEnabledStateObserver(
+    AsyncEnabledStateObserver* listener) {
+  AutoLock lock(lock_);
+  async_observers_.erase(listener);
+}
+
+bool TraceLog::HasAsyncEnabledStateObserver(
+    AsyncEnabledStateObserver* listener) const {
+  AutoLock lock(lock_);
+  return ContainsKey(async_observers_, listener);
+}
+
 }  // namespace trace_event
 }  // namespace base
 
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index 67477c4..e4407e8 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -8,6 +8,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -15,7 +16,6 @@
 #include "base/containers/hash_tables.h"
 #include "base/gtest_prod_util.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/memory/scoped_vector.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/trace_config.h"
@@ -119,6 +119,28 @@
   void RemoveEnabledStateObserver(EnabledStateObserver* listener);
   bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
 
+  // Asynchronous enabled state listeners. When tracing is enabled or disabled,
+  // for each observer, a task for invoking its appropriate callback is posted
+  // to the thread from which AddAsyncEnabledStateObserver() was called. This
+  // allows the observer to be safely destroyed, provided that it happens on the
+  // same thread that invoked AddAsyncEnabledStateObserver().
+  class BASE_EXPORT AsyncEnabledStateObserver {
+   public:
+    virtual ~AsyncEnabledStateObserver() = default;
+
+    // Posted just after the tracing system becomes enabled, outside |lock_|.
+    // TraceLog::IsEnabled() is true at this point.
+    virtual void OnTraceLogEnabled() = 0;
+
+    // Posted just after the tracing system becomes disabled, outside |lock_|.
+    // TraceLog::IsEnabled() is false at this point.
+    virtual void OnTraceLogDisabled() = 0;
+  };
+  void AddAsyncEnabledStateObserver(
+      WeakPtr<AsyncEnabledStateObserver> listener);
+  void RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener);
+  bool HasAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener) const;
+
   TraceLogStatus GetStatus() const;
   bool BufferIsFull() const;
 
@@ -191,7 +213,7 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithBindId(
       char phase,
@@ -204,7 +226,7 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithProcessId(
       char phase,
@@ -217,7 +239,7 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
       char phase,
@@ -231,7 +253,7 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
       char phase,
@@ -246,7 +268,7 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
 
   // Adds a metadata event that will be written when the trace log is flushed.
@@ -257,7 +279,7 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+      std::unique_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
 
   void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
@@ -365,6 +387,7 @@
 
   class ThreadLocalEventBuffer;
   class OptionalAutoLock;
+  struct RegisteredAsyncObserver;
 
   TraceLog();
   ~TraceLog() override;
@@ -400,7 +423,7 @@
   void FlushCurrentThread(int generation, bool discard_events);
   // Usually it runs on a different thread.
   static void ConvertTraceEventsToTraceFormat(
-      scoped_ptr<TraceBuffer> logged_events,
+      std::unique_ptr<TraceBuffer> logged_events,
       const TraceLog::OutputCallback& flush_output_callback,
       const ArgumentFilterPredicate& argument_filter_predicate);
   void FinishFlush(int generation, bool discard_events);
@@ -437,11 +460,13 @@
   Lock thread_info_lock_;
   Mode mode_;
   int num_traces_recorded_;
-  scoped_ptr<TraceBuffer> logged_events_;
-  std::vector<scoped_ptr<TraceEvent>> metadata_events_;
+  std::unique_ptr<TraceBuffer> logged_events_;
+  std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
   subtle::AtomicWord /* EventCallback */ event_callback_;
   bool dispatching_to_observer_list_;
   std::vector<EnabledStateObserver*> enabled_state_observer_list_;
+  std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
+      async_observers_;
 
   std::string process_name_;
   base::hash_map<int, std::string> process_labels_;
@@ -470,7 +495,7 @@
   subtle::AtomicWord /* Options */ trace_options_;
 
   // Sampling thread handles.
-  scoped_ptr<TraceSamplingThread> sampling_thread_;
+  std::unique_ptr<TraceSamplingThread> sampling_thread_;
   PlatformThreadHandle sampling_thread_handle_;
 
   TraceConfig trace_config_;
@@ -487,7 +512,7 @@
 
   // For events which can't be added into the thread local buffer, e.g. events
   // from threads without a message loop.
-  scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
+  std::unique_ptr<TraceBufferChunk> thread_shared_chunk_;
   size_t thread_shared_chunk_index_;
 
   // Set when asynchronous Flush is in progress.
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 5ae1dd0..7a88079 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -11,12 +11,13 @@
 #include "base/base_switches.h"
 #include "base/command_line.h"
 #include "base/compiler_specific.h"
+#include "base/debug/leak_annotations.h"
 #include "base/logging.h"
 #include "base/process/process_handle.h"
 #include "base/strings/stringprintf.h"
+#include "base/third_party/valgrind/memcheck.h"
 #include "base/tracking_info.h"
 #include "build/build_config.h"
-#include "third_party/valgrind/memcheck.h"
 
 using base::TimeDelta;
 
@@ -740,8 +741,7 @@
 }
 
 // static
-void ThreadData::EnsureCleanupWasCalled(
-    int /* major_threads_shutdown_count */) {
+void ThreadData::EnsureCleanupWasCalled(int /*major_threads_shutdown_count*/) {
   base::AutoLock lock(*list_lock_.Pointer());
   if (worker_thread_data_creation_count_ == 0)
     return;  // We haven't really run much, and couldn't have leaked.
@@ -790,6 +790,7 @@
   if (leak) {
     ThreadData* thread_data = thread_data_list;
     while (thread_data) {
+      ANNOTATE_LEAKING_OBJECT_PTR(thread_data);
       thread_data = thread_data->next();
     }
     return;
diff --git a/base/tracked_objects_unittest.cc b/base/tracked_objects_unittest.cc
index be86cbb..70d9601 100644
--- a/base/tracked_objects_unittest.cc
+++ b/base/tracked_objects_unittest.cc
@@ -9,7 +9,8 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "base/process/process_handle.h"
 #include "base/time/time.h"
 #include "base/tracking_info.h"
@@ -239,7 +240,7 @@
 TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
   ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
 
-  scoped_ptr<DeathData> data(new DeathData());
+  std::unique_ptr<DeathData> data(new DeathData());
   ASSERT_NE(data, nullptr);
   EXPECT_EQ(data->run_duration_sum(), 0);
   EXPECT_EQ(data->run_duration_max(), 0);
@@ -278,7 +279,7 @@
 TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
   ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
 
-  scoped_ptr<DeathData> data(new DeathData());
+  std::unique_ptr<DeathData> data(new DeathData());
   ASSERT_NE(data, nullptr);
 
   int32_t run_ms = 42;
diff --git a/base/tuple.h b/base/tuple.h
index 78dfd75..df69bf0 100644
--- a/base/tuple.h
+++ b/base/tuple.h
@@ -94,7 +94,7 @@
   using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11,12>;
 };
 
-#else  // defined(WIN) && defined(_PREFAST_)
+#else  // defined(OS_WIN) && defined(_PREFAST_)
 
 template <size_t... Ns>
 struct MakeIndexSequenceImpl<0, Ns...> {
@@ -105,7 +105,7 @@
 struct MakeIndexSequenceImpl<N, Ns...>
     : MakeIndexSequenceImpl<N - 1, N - 1, Ns...> {};
 
-#endif  // defined(WIN) && defined(_PREFAST_)
+#endif  // defined(OS_WIN) && defined(_PREFAST_)
 
 template <size_t N>
 using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
diff --git a/base/values.cc b/base/values.cc
index 80cc10c..5f6eaae 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -13,6 +13,7 @@
 
 #include "base/json/json_writer.h"
 #include "base/logging.h"
+#include "base/memory/ptr_util.h"
 #include "base/move.h"
 #include "base/strings/string_util.h"
 #include "base/strings/utf_string_conversions.h"
@@ -21,15 +22,15 @@
 
 namespace {
 
-scoped_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
 
 // Make a deep copy of |node|, but don't include empty lists or dictionaries
 // in the copy. It's possible for this function to return NULL and it
 // expects |node| to always be non-NULL.
-scoped_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
-  scoped_ptr<ListValue> copy;
+std::unique_ptr<ListValue> CopyListWithoutEmptyChildren(const ListValue& list) {
+  std::unique_ptr<ListValue> copy;
   for (ListValue::const_iterator it = list.begin(); it != list.end(); ++it) {
-    scoped_ptr<Value> child_copy = CopyWithoutEmptyChildren(**it);
+    std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(**it);
     if (child_copy) {
       if (!copy)
         copy.reset(new ListValue);
@@ -39,11 +40,11 @@
   return copy;
 }
 
-scoped_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
+std::unique_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
     const DictionaryValue& dict) {
-  scoped_ptr<DictionaryValue> copy;
+  std::unique_ptr<DictionaryValue> copy;
   for (DictionaryValue::Iterator it(dict); !it.IsAtEnd(); it.Advance()) {
-    scoped_ptr<Value> child_copy = CopyWithoutEmptyChildren(it.value());
+    std::unique_ptr<Value> child_copy = CopyWithoutEmptyChildren(it.value());
     if (child_copy) {
       if (!copy)
         copy.reset(new DictionaryValue);
@@ -53,7 +54,7 @@
   return copy;
 }
 
-scoped_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
+std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
   switch (node.GetType()) {
     case Value::TYPE_LIST:
       return CopyListWithoutEmptyChildren(static_cast<const ListValue&>(node));
@@ -89,51 +90,51 @@
 }
 
 // static
-scoped_ptr<Value> Value::CreateNullValue() {
-  return make_scoped_ptr(new Value(TYPE_NULL));
+std::unique_ptr<Value> Value::CreateNullValue() {
+  return WrapUnique(new Value(TYPE_NULL));
 }
 
-bool Value::GetAsBinary(const BinaryValue** /* out_value */) const {
+bool Value::GetAsBinary(const BinaryValue**) const {
   return false;
 }
 
-bool Value::GetAsBoolean(bool* /* out_value */) const {
+bool Value::GetAsBoolean(bool*) const {
   return false;
 }
 
-bool Value::GetAsInteger(int* /* out_value */) const {
+bool Value::GetAsInteger(int*) const {
   return false;
 }
 
-bool Value::GetAsDouble(double* /* out_value */) const {
+bool Value::GetAsDouble(double*) const {
   return false;
 }
 
-bool Value::GetAsString(std::string* /* out_value */) const {
+bool Value::GetAsString(std::string*) const {
   return false;
 }
 
-bool Value::GetAsString(string16* /* out_value */) const {
+bool Value::GetAsString(string16*) const {
   return false;
 }
 
-bool Value::GetAsString(const StringValue** /* out_value */) const {
+bool Value::GetAsString(const StringValue**) const {
   return false;
 }
 
-bool Value::GetAsList(ListValue** /* out_value */) {
+bool Value::GetAsList(ListValue**) {
   return false;
 }
 
-bool Value::GetAsList(const ListValue** /* out_value */) const {
+bool Value::GetAsList(const ListValue**) const {
   return false;
 }
 
-bool Value::GetAsDictionary(DictionaryValue** /* out_value */) {
+bool Value::GetAsDictionary(DictionaryValue**) {
   return false;
 }
 
-bool Value::GetAsDictionary(const DictionaryValue** /* out_value */) const {
+bool Value::GetAsDictionary(const DictionaryValue**) const {
   return false;
 }
 
@@ -144,8 +145,8 @@
   return CreateNullValue().release();
 }
 
-scoped_ptr<Value> Value::CreateDeepCopy() const {
-  return make_scoped_ptr(DeepCopy());
+std::unique_ptr<Value> Value::CreateDeepCopy() const {
+  return WrapUnique(DeepCopy());
 }
 
 bool Value::Equals(const Value* other) const {
@@ -313,7 +314,7 @@
       size_(0) {
 }
 
-BinaryValue::BinaryValue(scoped_ptr<char[]> buffer, size_t size)
+BinaryValue::BinaryValue(std::unique_ptr<char[]> buffer, size_t size)
     : Value(TYPE_BINARY), buffer_(std::move(buffer)), size_(size) {}
 
 BinaryValue::~BinaryValue() {
@@ -324,7 +325,7 @@
                                                  size_t size) {
   char* buffer_copy = new char[size];
   memcpy(buffer_copy, buffer, size);
-  scoped_ptr<char[]> scoped_buffer_copy(buffer_copy);
+  std::unique_ptr<char[]> scoped_buffer_copy(buffer_copy);
   return new BinaryValue(std::move(scoped_buffer_copy), size);
 }
 
@@ -350,11 +351,12 @@
 ///////////////////// DictionaryValue ////////////////////
 
 // static
-scoped_ptr<DictionaryValue> DictionaryValue::From(scoped_ptr<Value> value) {
+std::unique_ptr<DictionaryValue> DictionaryValue::From(
+    std::unique_ptr<Value> value) {
   DictionaryValue* out;
   if (value && value->GetAsDictionary(&out)) {
     ignore_result(value.release());
-    return make_scoped_ptr(out);
+    return WrapUnique(out);
   }
   return nullptr;
 }
@@ -396,7 +398,8 @@
   dictionary_.clear();
 }
 
-void DictionaryValue::Set(const std::string& path, scoped_ptr<Value> in_value) {
+void DictionaryValue::Set(const std::string& path,
+                          std::unique_ptr<Value> in_value) {
   DCHECK(IsStringUTF8(path));
   DCHECK(in_value);
 
@@ -422,7 +425,7 @@
 }
 
 void DictionaryValue::Set(const std::string& path, Value* in_value) {
-  Set(path, make_scoped_ptr(in_value));
+  Set(path, WrapUnique(in_value));
 }
 
 void DictionaryValue::SetBoolean(const std::string& path, bool in_value) {
@@ -448,7 +451,7 @@
 }
 
 void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
-                                              scoped_ptr<Value> in_value) {
+                                              std::unique_ptr<Value> in_value) {
   Value* bare_ptr = in_value.release();
   // If there's an existing value here, we need to delete it, because
   // we own all our children.
@@ -463,7 +466,7 @@
 
 void DictionaryValue::SetWithoutPathExpansion(const std::string& key,
                                               Value* in_value) {
-  SetWithoutPathExpansion(key, make_scoped_ptr(in_value));
+  SetWithoutPathExpansion(key, WrapUnique(in_value));
 }
 
 void DictionaryValue::SetBooleanWithoutPathExpansion(
@@ -752,7 +755,7 @@
 }
 
 bool DictionaryValue::Remove(const std::string& path,
-                             scoped_ptr<Value>* out_value) {
+                             std::unique_ptr<Value>* out_value) {
   DCHECK(IsStringUTF8(path));
   std::string current_path(path);
   DictionaryValue* current_dictionary = this;
@@ -768,8 +771,9 @@
                                                         out_value);
 }
 
-bool DictionaryValue::RemoveWithoutPathExpansion(const std::string& key,
-                                                 scoped_ptr<Value>* out_value) {
+bool DictionaryValue::RemoveWithoutPathExpansion(
+    const std::string& key,
+    std::unique_ptr<Value>* out_value) {
   DCHECK(IsStringUTF8(key));
   ValueMap::iterator entry_iterator = dictionary_.find(key);
   if (entry_iterator == dictionary_.end())
@@ -785,7 +789,7 @@
 }
 
 bool DictionaryValue::RemovePath(const std::string& path,
-                                 scoped_ptr<Value>* out_value) {
+                                 std::unique_ptr<Value>* out_value) {
   bool result = false;
   size_t delimiter_position = path.find('.');
 
@@ -804,9 +808,10 @@
   return result;
 }
 
-scoped_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
+std::unique_ptr<DictionaryValue> DictionaryValue::DeepCopyWithoutEmptyChildren()
     const {
-  scoped_ptr<DictionaryValue> copy = CopyDictionaryWithoutEmptyChildren(*this);
+  std::unique_ptr<DictionaryValue> copy =
+      CopyDictionaryWithoutEmptyChildren(*this);
   if (!copy)
     copy.reset(new DictionaryValue);
   return copy;
@@ -853,8 +858,8 @@
   return result;
 }
 
-scoped_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
-  return make_scoped_ptr(DeepCopy());
+std::unique_ptr<DictionaryValue> DictionaryValue::CreateDeepCopy() const {
+  return WrapUnique(DeepCopy());
 }
 
 bool DictionaryValue::Equals(const Value* other) const {
@@ -882,11 +887,11 @@
 ///////////////////// ListValue ////////////////////
 
 // static
-scoped_ptr<ListValue> ListValue::From(scoped_ptr<Value> value) {
+std::unique_ptr<ListValue> ListValue::From(std::unique_ptr<Value> value) {
   ListValue* out;
   if (value && value->GetAsList(&out)) {
     ignore_result(value.release());
-    return make_scoped_ptr(out);
+    return WrapUnique(out);
   }
   return nullptr;
 }
@@ -921,7 +926,7 @@
   return true;
 }
 
-bool ListValue::Set(size_t index, scoped_ptr<Value> in_value) {
+bool ListValue::Set(size_t index, std::unique_ptr<Value> in_value) {
   return Set(index, in_value.release());
 }
 
@@ -1036,7 +1041,7 @@
       const_cast<const ListValue**>(out_value));
 }
 
-bool ListValue::Remove(size_t index, scoped_ptr<Value>* out_value) {
+bool ListValue::Remove(size_t index, std::unique_ptr<Value>* out_value) {
   if (index >= list_.size())
     return false;
 
@@ -1065,7 +1070,7 @@
 }
 
 ListValue::iterator ListValue::Erase(iterator iter,
-                                     scoped_ptr<Value>* out_value) {
+                                     std::unique_ptr<Value>* out_value) {
   if (out_value)
     out_value->reset(*iter);
   else
@@ -1074,7 +1079,7 @@
   return list_.erase(iter);
 }
 
-void ListValue::Append(scoped_ptr<Value> in_value) {
+void ListValue::Append(std::unique_ptr<Value> in_value) {
   Append(in_value.release());
 }
 
@@ -1167,8 +1172,8 @@
   return result;
 }
 
-scoped_ptr<ListValue> ListValue::CreateDeepCopy() const {
-  return make_scoped_ptr(DeepCopy());
+std::unique_ptr<ListValue> ListValue::CreateDeepCopy() const {
+  return WrapUnique(DeepCopy());
 }
 
 bool ListValue::Equals(const Value* other) const {
diff --git a/base/values.h b/base/values.h
index 141ea93..e2506cc 100644
--- a/base/values.h
+++ b/base/values.h
@@ -22,6 +22,7 @@
 
 #include <iosfwd>
 #include <map>
+#include <memory>
 #include <string>
 #include <utility>
 #include <vector>
@@ -29,7 +30,6 @@
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string16.h"
 #include "base/strings/string_piece.h"
 
@@ -66,7 +66,7 @@
 
   virtual ~Value();
 
-  static scoped_ptr<Value> CreateNullValue();
+  static std::unique_ptr<Value> CreateNullValue();
 
   // Returns the type of the value stored by the current Value object.
   // Each type will be implemented by only one subclass of Value, so it's
@@ -102,7 +102,7 @@
   // this works because C++ supports covariant return types.
   virtual Value* DeepCopy() const;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
-  scoped_ptr<Value> CreateDeepCopy() const;
+  std::unique_ptr<Value> CreateDeepCopy() const;
 
   // Compares if two Value objects have equal contents.
   virtual bool Equals(const Value* other) const;
@@ -178,7 +178,7 @@
 
   // Creates a BinaryValue, taking ownership of the bytes pointed to by
   // |buffer|.
-  BinaryValue(scoped_ptr<char[]> buffer, size_t size);
+  BinaryValue(std::unique_ptr<char[]> buffer, size_t size);
 
   ~BinaryValue() override;
 
@@ -199,7 +199,7 @@
   bool Equals(const Value* other) const override;
 
  private:
-  scoped_ptr<char[]> buffer_;
+  std::unique_ptr<char[]> buffer_;
   size_t size_;
 
   DISALLOW_COPY_AND_ASSIGN(BinaryValue);
@@ -211,7 +211,7 @@
 class BASE_EXPORT DictionaryValue : public Value {
  public:
   // Returns |value| if it is a dictionary, nullptr otherwise.
-  static scoped_ptr<DictionaryValue> From(scoped_ptr<Value> value);
+  static std::unique_ptr<DictionaryValue> From(std::unique_ptr<Value> value);
 
   DictionaryValue();
   ~DictionaryValue() override;
@@ -239,7 +239,7 @@
   // If the key at any step of the way doesn't exist, or exists but isn't
   // a DictionaryValue, a new DictionaryValue will be created and attached
   // to the path in that location. |in_value| must be non-null.
-  void Set(const std::string& path, scoped_ptr<Value> in_value);
+  void Set(const std::string& path, std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
   void Set(const std::string& path, Value* in_value);
 
@@ -254,7 +254,7 @@
   // Like Set(), but without special treatment of '.'.  This allows e.g. URLs to
   // be used as paths.
   void SetWithoutPathExpansion(const std::string& key,
-                               scoped_ptr<Value> in_value);
+                               std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
   void SetWithoutPathExpansion(const std::string& key, Value* in_value);
 
@@ -329,21 +329,22 @@
   // |out_value|.  If |out_value| is NULL, the removed value will be deleted.
   // This method returns true if |path| is a valid path; otherwise it will
   // return false and the DictionaryValue object will be unchanged.
-  virtual bool Remove(const std::string& path, scoped_ptr<Value>* out_value);
+  virtual bool Remove(const std::string& path,
+                      std::unique_ptr<Value>* out_value);
 
   // Like Remove(), but without special treatment of '.'.  This allows e.g. URLs
   // to be used as paths.
   virtual bool RemoveWithoutPathExpansion(const std::string& key,
-                                          scoped_ptr<Value>* out_value);
+                                          std::unique_ptr<Value>* out_value);
 
   // Removes a path, clearing out all dictionaries on |path| that remain empty
   // after removing the value at |path|.
   virtual bool RemovePath(const std::string& path,
-                          scoped_ptr<Value>* out_value);
+                          std::unique_ptr<Value>* out_value);
 
   // Makes a copy of |this| but doesn't include empty dictionaries and lists in
   // the copy.  This never returns NULL, even if |this| itself is empty.
-  scoped_ptr<DictionaryValue> DeepCopyWithoutEmptyChildren() const;
+  std::unique_ptr<DictionaryValue> DeepCopyWithoutEmptyChildren() const;
 
   // Merge |dictionary| into this dictionary. This is done recursively, i.e. any
   // sub-dictionaries will be merged as well. In case of key collisions, the
@@ -377,7 +378,7 @@
   // Overridden from Value:
   DictionaryValue* DeepCopy() const override;
   // Preferred version of DeepCopy. TODO(estade): remove the above.
-  scoped_ptr<DictionaryValue> CreateDeepCopy() const;
+  std::unique_ptr<DictionaryValue> CreateDeepCopy() const;
   bool Equals(const Value* other) const override;
 
  private:
@@ -393,7 +394,7 @@
   typedef ValueVector::const_iterator const_iterator;
 
   // Returns |value| if it is a list, nullptr otherwise.
-  static scoped_ptr<ListValue> From(scoped_ptr<Value> value);
+  static std::unique_ptr<ListValue> From(std::unique_ptr<Value> value);
 
   ListValue();
   ~ListValue() override;
@@ -414,7 +415,7 @@
   // the value is a null pointer.
   bool Set(size_t index, Value* in_value);
   // Preferred version of the above. TODO(estade): remove the above.
-  bool Set(size_t index, scoped_ptr<Value> in_value);
+  bool Set(size_t index, std::unique_ptr<Value> in_value);
 
   // Gets the Value at the given index.  Modifies |out_value| (and returns true)
   // only if the index falls within the current list range.
@@ -446,7 +447,7 @@
   // passed out via |out_value|.  If |out_value| is NULL, the removed value will
   // be deleted.  This method returns true if |index| is valid; otherwise
   // it will return false and the ListValue object will be unchanged.
-  virtual bool Remove(size_t index, scoped_ptr<Value>* out_value);
+  virtual bool Remove(size_t index, std::unique_ptr<Value>* out_value);
 
   // Removes the first instance of |value| found in the list, if any, and
   // deletes it. |index| is the location where |value| was found. Returns false
@@ -457,10 +458,10 @@
   // deleted, otherwise ownership of the value is passed back to the caller.
   // Returns an iterator pointing to the location of the element that
   // followed the erased element.
-  iterator Erase(iterator iter, scoped_ptr<Value>* out_value);
+  iterator Erase(iterator iter, std::unique_ptr<Value>* out_value);
 
   // Appends a Value to the end of the list.
-  void Append(scoped_ptr<Value> in_value);
+  void Append(std::unique_ptr<Value> in_value);
   // Deprecated version of the above. TODO(estade): remove.
   void Append(Value* in_value);
 
@@ -504,7 +505,7 @@
   bool Equals(const Value* other) const override;
 
   // Preferred version of DeepCopy. TODO(estade): remove DeepCopy.
-  scoped_ptr<ListValue> CreateDeepCopy() const;
+  std::unique_ptr<ListValue> CreateDeepCopy() const;
 
  private:
   ValueVector list_;
@@ -533,8 +534,8 @@
   // error_code will be set with the underlying error.
   // If |error_message| is non-null, it will be filled in with a formatted
   // error message including the location of the error if appropriate.
-  virtual scoped_ptr<Value> Deserialize(int* error_code,
-                                        std::string* error_str) = 0;
+  virtual std::unique_ptr<Value> Deserialize(int* error_code,
+                                             std::string* error_str) = 0;
 };
 
 // Stream operator so Values can be used in assertion statements.  In order that
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 175a0d0..ac78830 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -2,15 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "base/values.h"
+
 #include <stddef.h>
 
 #include <limits>
+#include <memory>
 #include <utility>
 
-#include "base/memory/scoped_ptr.h"
+#include "base/memory/ptr_util.h"
 #include "base/strings/string16.h"
 #include "base/strings/utf_string_conversions.h"
-#include "base/values.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -36,11 +38,11 @@
   ASSERT_FALSE(
     settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
 
-  scoped_ptr<ListValue> new_toolbar_bookmarks(new ListValue);
+  std::unique_ptr<ListValue> new_toolbar_bookmarks(new ListValue);
   settings.Set("global.toolbar.bookmarks", std::move(new_toolbar_bookmarks));
   ASSERT_TRUE(settings.GetList("global.toolbar.bookmarks", &toolbar_bookmarks));
 
-  scoped_ptr<DictionaryValue> new_bookmark(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> new_bookmark(new DictionaryValue);
   new_bookmark->SetString("name", "Froogle");
   new_bookmark->SetString("url", "http://froogle.com");
   toolbar_bookmarks->Append(std::move(new_bookmark));
@@ -59,11 +61,11 @@
 }
 
 TEST(ValuesTest, List) {
-  scoped_ptr<ListValue> mixed_list(new ListValue());
-  mixed_list->Set(0, make_scoped_ptr(new FundamentalValue(true)));
-  mixed_list->Set(1, make_scoped_ptr(new FundamentalValue(42)));
-  mixed_list->Set(2, make_scoped_ptr(new FundamentalValue(88.8)));
-  mixed_list->Set(3, make_scoped_ptr(new StringValue("foo")));
+  std::unique_ptr<ListValue> mixed_list(new ListValue());
+  mixed_list->Set(0, WrapUnique(new FundamentalValue(true)));
+  mixed_list->Set(1, WrapUnique(new FundamentalValue(42)));
+  mixed_list->Set(2, WrapUnique(new FundamentalValue(88.8)));
+  mixed_list->Set(3, WrapUnique(new StringValue("foo")));
   ASSERT_EQ(4u, mixed_list->GetSize());
 
   Value *value = NULL;
@@ -109,13 +111,13 @@
 
 TEST(ValuesTest, BinaryValue) {
   // Default constructor creates a BinaryValue with a null buffer and size 0.
-  scoped_ptr<BinaryValue> binary(new BinaryValue());
+  std::unique_ptr<BinaryValue> binary(new BinaryValue());
   ASSERT_TRUE(binary.get());
   ASSERT_EQ(NULL, binary->GetBuffer());
   ASSERT_EQ(0U, binary->GetSize());
 
   // Test the common case of a non-empty buffer
-  scoped_ptr<char[]> buffer(new char[15]);
+  std::unique_ptr<char[]> buffer(new char[15]);
   char* original_buffer = buffer.get();
   binary.reset(new BinaryValue(std::move(buffer), 15));
   ASSERT_TRUE(binary.get());
@@ -141,10 +143,10 @@
 
 TEST(ValuesTest, StringValue) {
   // Test overloaded StringValue constructor.
-  scoped_ptr<Value> narrow_value(new StringValue("narrow"));
+  std::unique_ptr<Value> narrow_value(new StringValue("narrow"));
   ASSERT_TRUE(narrow_value.get());
   ASSERT_TRUE(narrow_value->IsType(Value::TYPE_STRING));
-  scoped_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
+  std::unique_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
   ASSERT_TRUE(utf16_value.get());
   ASSERT_TRUE(utf16_value->IsType(Value::TYPE_STRING));
 
@@ -198,14 +200,14 @@
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
   }
   EXPECT_TRUE(deletion_flag);
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     list.Clear();
     EXPECT_TRUE(deletion_flag);
@@ -213,7 +215,7 @@
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(list.Set(0, Value::CreateNullValue()));
     EXPECT_TRUE(deletion_flag);
@@ -222,18 +224,18 @@
 
 TEST(ValuesTest, ListRemoval) {
   bool deletion_flag = true;
-  scoped_ptr<Value> removed_item;
+  std::unique_ptr<Value> removed_item;
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_EQ(1U, list.GetSize());
     EXPECT_FALSE(list.Remove(std::numeric_limits<size_t>::max(),
                              &removed_item));
     EXPECT_FALSE(list.Remove(1, &removed_item));
     EXPECT_TRUE(list.Remove(0, &removed_item));
-    ASSERT_TRUE(removed_item.get());
+    ASSERT_TRUE(removed_item);
     EXPECT_EQ(0U, list.GetSize());
   }
   EXPECT_FALSE(deletion_flag);
@@ -242,7 +244,7 @@
 
   {
     ListValue list;
-    list.Append(make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    list.Append(WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(list.Remove(0, NULL));
     EXPECT_TRUE(deletion_flag);
@@ -251,7 +253,8 @@
 
   {
     ListValue list;
-    scoped_ptr<DeletionTestValue> value(new DeletionTestValue(&deletion_flag));
+    std::unique_ptr<DeletionTestValue> value(
+        new DeletionTestValue(&deletion_flag));
     DeletionTestValue* original_value = value.get();
     list.Append(std::move(value));
     EXPECT_FALSE(deletion_flag);
@@ -269,14 +272,14 @@
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
   }
   EXPECT_TRUE(deletion_flag);
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     dict.Clear();
     EXPECT_TRUE(deletion_flag);
@@ -284,7 +287,7 @@
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     dict.Set(key, Value::CreateNullValue());
     EXPECT_TRUE(deletion_flag);
@@ -294,17 +297,17 @@
 TEST(ValuesTest, DictionaryRemoval) {
   std::string key = "test";
   bool deletion_flag = true;
-  scoped_ptr<Value> removed_item;
+  std::unique_ptr<Value> removed_item;
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(dict.HasKey(key));
     EXPECT_FALSE(dict.Remove("absent key", &removed_item));
     EXPECT_TRUE(dict.Remove(key, &removed_item));
     EXPECT_FALSE(dict.HasKey(key));
-    ASSERT_TRUE(removed_item.get());
+    ASSERT_TRUE(removed_item);
   }
   EXPECT_FALSE(deletion_flag);
   removed_item.reset();
@@ -312,7 +315,7 @@
 
   {
     DictionaryValue dict;
-    dict.Set(key, make_scoped_ptr(new DeletionTestValue(&deletion_flag)));
+    dict.Set(key, WrapUnique(new DeletionTestValue(&deletion_flag)));
     EXPECT_FALSE(deletion_flag);
     EXPECT_TRUE(dict.HasKey(key));
     EXPECT_TRUE(dict.Remove(key, NULL));
@@ -372,9 +375,9 @@
   dict.SetInteger("a.long.way.down", 1);
   dict.SetBoolean("a.long.key.path", true);
 
-  scoped_ptr<Value> removed_item;
+  std::unique_ptr<Value> removed_item;
   EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
-  ASSERT_TRUE(removed_item.get());
+  ASSERT_TRUE(removed_item);
   EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_INTEGER));
   EXPECT_FALSE(dict.HasKey("a.long.way.down"));
   EXPECT_FALSE(dict.HasKey("a.long.way"));
@@ -387,56 +390,59 @@
 
   removed_item.reset();
   EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
-  ASSERT_TRUE(removed_item.get());
+  ASSERT_TRUE(removed_item);
   EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_BOOLEAN));
   EXPECT_TRUE(dict.empty());
 }
 
 TEST(ValuesTest, DeepCopy) {
   DictionaryValue original_dict;
-  scoped_ptr<Value> scoped_null = Value::CreateNullValue();
+  std::unique_ptr<Value> scoped_null = Value::CreateNullValue();
   Value* original_null = scoped_null.get();
   original_dict.Set("null", std::move(scoped_null));
-  scoped_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+  std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
   FundamentalValue* original_bool = scoped_bool.get();
   original_dict.Set("bool", std::move(scoped_bool));
-  scoped_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+  std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
   FundamentalValue* original_int = scoped_int.get();
   original_dict.Set("int", std::move(scoped_int));
-  scoped_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+  std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
   FundamentalValue* original_double = scoped_double.get();
   original_dict.Set("double", std::move(scoped_double));
-  scoped_ptr<StringValue> scoped_string(new StringValue("hello"));
+  std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
   StringValue* original_string = scoped_string.get();
   original_dict.Set("string", std::move(scoped_string));
-  scoped_ptr<StringValue> scoped_string16(
+  std::unique_ptr<StringValue> scoped_string16(
       new StringValue(ASCIIToUTF16("hello16")));
   StringValue* original_string16 = scoped_string16.get();
   original_dict.Set("string16", std::move(scoped_string16));
 
-  scoped_ptr<char[]> original_buffer(new char[42]);
+  std::unique_ptr<char[]> original_buffer(new char[42]);
   memset(original_buffer.get(), '!', 42);
-  scoped_ptr<BinaryValue> scoped_binary(
+  std::unique_ptr<BinaryValue> scoped_binary(
       new BinaryValue(std::move(original_buffer), 42));
   BinaryValue* original_binary = scoped_binary.get();
   original_dict.Set("binary", std::move(scoped_binary));
 
-  scoped_ptr<ListValue> scoped_list(new ListValue());
+  std::unique_ptr<ListValue> scoped_list(new ListValue());
   Value* original_list = scoped_list.get();
-  scoped_ptr<FundamentalValue> scoped_list_element_0(new FundamentalValue(0));
+  std::unique_ptr<FundamentalValue> scoped_list_element_0(
+      new FundamentalValue(0));
   Value* original_list_element_0 = scoped_list_element_0.get();
   scoped_list->Append(std::move(scoped_list_element_0));
-  scoped_ptr<FundamentalValue> scoped_list_element_1(new FundamentalValue(1));
+  std::unique_ptr<FundamentalValue> scoped_list_element_1(
+      new FundamentalValue(1));
   Value* original_list_element_1 = scoped_list_element_1.get();
   scoped_list->Append(std::move(scoped_list_element_1));
   original_dict.Set("list", std::move(scoped_list));
 
-  scoped_ptr<DictionaryValue> scoped_nested_dictionary(new DictionaryValue());
+  std::unique_ptr<DictionaryValue> scoped_nested_dictionary(
+      new DictionaryValue());
   Value* original_nested_dictionary = scoped_nested_dictionary.get();
   scoped_nested_dictionary->SetString("key", "value");
   original_dict.Set("dictionary", std::move(scoped_nested_dictionary));
 
-  scoped_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
+  std::unique_ptr<DictionaryValue> copy_dict = original_dict.CreateDeepCopy();
   ASSERT_TRUE(copy_dict.get());
   ASSERT_NE(copy_dict.get(), &original_dict);
 
@@ -546,8 +552,8 @@
 }
 
 TEST(ValuesTest, Equals) {
-  scoped_ptr<Value> null1(Value::CreateNullValue());
-  scoped_ptr<Value> null2(Value::CreateNullValue());
+  std::unique_ptr<Value> null1(Value::CreateNullValue());
+  std::unique_ptr<Value> null2(Value::CreateNullValue());
   EXPECT_NE(null1.get(), null2.get());
   EXPECT_TRUE(null1->Equals(null2.get()));
 
@@ -562,21 +568,21 @@
   dv.SetString("d2", ASCIIToUTF16("http://google.com"));
   dv.Set("e", Value::CreateNullValue());
 
-  scoped_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
+  std::unique_ptr<DictionaryValue> copy = dv.CreateDeepCopy();
   EXPECT_TRUE(dv.Equals(copy.get()));
 
-  scoped_ptr<ListValue> list(new ListValue);
+  std::unique_ptr<ListValue> list(new ListValue);
   ListValue* original_list = list.get();
   list->Append(Value::CreateNullValue());
-  list->Append(make_scoped_ptr(new DictionaryValue));
-  scoped_ptr<Value> list_copy(list->CreateDeepCopy());
+  list->Append(WrapUnique(new DictionaryValue));
+  std::unique_ptr<Value> list_copy(list->CreateDeepCopy());
 
   dv.Set("f", std::move(list));
   EXPECT_FALSE(dv.Equals(copy.get()));
   copy->Set("f", std::move(list_copy));
   EXPECT_TRUE(dv.Equals(copy.get()));
 
-  original_list->Append(make_scoped_ptr(new FundamentalValue(true)));
+  original_list->Append(WrapUnique(new FundamentalValue(true)));
   EXPECT_FALSE(dv.Equals(copy.get()));
 
   // Check if Equals detects differences in only the keys.
@@ -588,14 +594,14 @@
 }
 
 TEST(ValuesTest, StaticEquals) {
-  scoped_ptr<Value> null1(Value::CreateNullValue());
-  scoped_ptr<Value> null2(Value::CreateNullValue());
+  std::unique_ptr<Value> null1(Value::CreateNullValue());
+  std::unique_ptr<Value> null2(Value::CreateNullValue());
   EXPECT_TRUE(Value::Equals(null1.get(), null2.get()));
   EXPECT_TRUE(Value::Equals(NULL, NULL));
 
-  scoped_ptr<Value> i42(new FundamentalValue(42));
-  scoped_ptr<Value> j42(new FundamentalValue(42));
-  scoped_ptr<Value> i17(new FundamentalValue(17));
+  std::unique_ptr<Value> i42(new FundamentalValue(42));
+  std::unique_ptr<Value> j42(new FundamentalValue(42));
+  std::unique_ptr<Value> i17(new FundamentalValue(17));
   EXPECT_TRUE(Value::Equals(i42.get(), i42.get()));
   EXPECT_TRUE(Value::Equals(j42.get(), i42.get()));
   EXPECT_TRUE(Value::Equals(i42.get(), j42.get()));
@@ -612,50 +618,52 @@
 
 TEST(ValuesTest, DeepCopyCovariantReturnTypes) {
   DictionaryValue original_dict;
-  scoped_ptr<Value> scoped_null(Value::CreateNullValue());
+  std::unique_ptr<Value> scoped_null(Value::CreateNullValue());
   Value* original_null = scoped_null.get();
   original_dict.Set("null", std::move(scoped_null));
-  scoped_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
+  std::unique_ptr<FundamentalValue> scoped_bool(new FundamentalValue(true));
   Value* original_bool = scoped_bool.get();
   original_dict.Set("bool", std::move(scoped_bool));
-  scoped_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
+  std::unique_ptr<FundamentalValue> scoped_int(new FundamentalValue(42));
   Value* original_int = scoped_int.get();
   original_dict.Set("int", std::move(scoped_int));
-  scoped_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
+  std::unique_ptr<FundamentalValue> scoped_double(new FundamentalValue(3.14));
   Value* original_double = scoped_double.get();
   original_dict.Set("double", std::move(scoped_double));
-  scoped_ptr<StringValue> scoped_string(new StringValue("hello"));
+  std::unique_ptr<StringValue> scoped_string(new StringValue("hello"));
   Value* original_string = scoped_string.get();
   original_dict.Set("string", std::move(scoped_string));
-  scoped_ptr<StringValue> scoped_string16(
+  std::unique_ptr<StringValue> scoped_string16(
       new StringValue(ASCIIToUTF16("hello16")));
   Value* original_string16 = scoped_string16.get();
   original_dict.Set("string16", std::move(scoped_string16));
 
-  scoped_ptr<char[]> original_buffer(new char[42]);
+  std::unique_ptr<char[]> original_buffer(new char[42]);
   memset(original_buffer.get(), '!', 42);
-  scoped_ptr<BinaryValue> scoped_binary(
+  std::unique_ptr<BinaryValue> scoped_binary(
       new BinaryValue(std::move(original_buffer), 42));
   Value* original_binary = scoped_binary.get();
   original_dict.Set("binary", std::move(scoped_binary));
 
-  scoped_ptr<ListValue> scoped_list(new ListValue());
+  std::unique_ptr<ListValue> scoped_list(new ListValue());
   Value* original_list = scoped_list.get();
-  scoped_ptr<FundamentalValue> scoped_list_element_0(new FundamentalValue(0));
+  std::unique_ptr<FundamentalValue> scoped_list_element_0(
+      new FundamentalValue(0));
   scoped_list->Append(std::move(scoped_list_element_0));
-  scoped_ptr<FundamentalValue> scoped_list_element_1(new FundamentalValue(1));
+  std::unique_ptr<FundamentalValue> scoped_list_element_1(
+      new FundamentalValue(1));
   scoped_list->Append(std::move(scoped_list_element_1));
   original_dict.Set("list", std::move(scoped_list));
 
-  scoped_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
-  scoped_ptr<Value> copy_null = original_null->CreateDeepCopy();
-  scoped_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
-  scoped_ptr<Value> copy_int = original_int->CreateDeepCopy();
-  scoped_ptr<Value> copy_double = original_double->CreateDeepCopy();
-  scoped_ptr<Value> copy_string = original_string->CreateDeepCopy();
-  scoped_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
-  scoped_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
-  scoped_ptr<Value> copy_list = original_list->CreateDeepCopy();
+  std::unique_ptr<Value> copy_dict = original_dict.CreateDeepCopy();
+  std::unique_ptr<Value> copy_null = original_null->CreateDeepCopy();
+  std::unique_ptr<Value> copy_bool = original_bool->CreateDeepCopy();
+  std::unique_ptr<Value> copy_int = original_int->CreateDeepCopy();
+  std::unique_ptr<Value> copy_double = original_double->CreateDeepCopy();
+  std::unique_ptr<Value> copy_string = original_string->CreateDeepCopy();
+  std::unique_ptr<Value> copy_string16 = original_string16->CreateDeepCopy();
+  std::unique_ptr<Value> copy_binary = original_binary->CreateDeepCopy();
+  std::unique_ptr<Value> copy_list = original_list->CreateDeepCopy();
 
   EXPECT_TRUE(original_dict.Equals(copy_dict.get()));
   EXPECT_TRUE(original_null->Equals(copy_null.get()));
@@ -669,18 +677,18 @@
 }
 
 TEST(ValuesTest, RemoveEmptyChildren) {
-  scoped_ptr<DictionaryValue> root(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> root(new DictionaryValue);
   // Remove empty lists and dictionaries.
-  root->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
-  root->Set("empty_list", make_scoped_ptr(new ListValue));
+  root->Set("empty_dict", WrapUnique(new DictionaryValue));
+  root->Set("empty_list", WrapUnique(new ListValue));
   root->SetWithoutPathExpansion("a.b.c.d.e",
-                                make_scoped_ptr(new DictionaryValue));
+                                WrapUnique(new DictionaryValue));
   root = root->DeepCopyWithoutEmptyChildren();
   EXPECT_TRUE(root->empty());
 
   // Make sure we don't prune too much.
   root->SetBoolean("bool", true);
-  root->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
+  root->Set("empty_dict", WrapUnique(new DictionaryValue));
   root->SetString("empty_string", std::string());
   root = root->DeepCopyWithoutEmptyChildren();
   EXPECT_EQ(2U, root->size());
@@ -692,22 +700,22 @@
   // Nested test cases.  These should all reduce back to the bool and string
   // set above.
   {
-    root->Set("a.b.c.d.e", make_scoped_ptr(new DictionaryValue));
+    root->Set("a.b.c.d.e", WrapUnique(new DictionaryValue));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
   }
   {
-    scoped_ptr<DictionaryValue> inner(new DictionaryValue);
-    inner->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
-    inner->Set("empty_list", make_scoped_ptr(new ListValue));
+    std::unique_ptr<DictionaryValue> inner(new DictionaryValue);
+    inner->Set("empty_dict", WrapUnique(new DictionaryValue));
+    inner->Set("empty_list", WrapUnique(new ListValue));
     root->Set("dict_with_empty_children", std::move(inner));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
   }
   {
-    scoped_ptr<ListValue> inner(new ListValue);
-    inner->Append(make_scoped_ptr(new DictionaryValue));
-    inner->Append(make_scoped_ptr(new ListValue));
+    std::unique_ptr<ListValue> inner(new ListValue);
+    inner->Append(WrapUnique(new DictionaryValue));
+    inner->Append(WrapUnique(new ListValue));
     root->Set("list_with_empty_children", std::move(inner));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
@@ -715,13 +723,13 @@
 
   // Nested with siblings.
   {
-    scoped_ptr<ListValue> inner(new ListValue());
-    inner->Append(make_scoped_ptr(new DictionaryValue));
-    inner->Append(make_scoped_ptr(new ListValue));
+    std::unique_ptr<ListValue> inner(new ListValue());
+    inner->Append(WrapUnique(new DictionaryValue));
+    inner->Append(WrapUnique(new ListValue));
     root->Set("list_with_empty_children", std::move(inner));
-    scoped_ptr<DictionaryValue> inner2(new DictionaryValue);
-    inner2->Set("empty_dict", make_scoped_ptr(new DictionaryValue));
-    inner2->Set("empty_list", make_scoped_ptr(new ListValue));
+    std::unique_ptr<DictionaryValue> inner2(new DictionaryValue);
+    inner2->Set("empty_dict", WrapUnique(new DictionaryValue));
+    inner2->Set("empty_list", WrapUnique(new ListValue));
     root->Set("dict_with_empty_children", std::move(inner2));
     root = root->DeepCopyWithoutEmptyChildren();
     EXPECT_EQ(2U, root->size());
@@ -729,10 +737,10 @@
 
   // Make sure nested values don't get pruned.
   {
-    scoped_ptr<ListValue> inner(new ListValue);
-    scoped_ptr<ListValue> inner2(new ListValue);
-    inner2->Append(make_scoped_ptr(new StringValue("hello")));
-    inner->Append(make_scoped_ptr(new DictionaryValue));
+    std::unique_ptr<ListValue> inner(new ListValue);
+    std::unique_ptr<ListValue> inner2(new ListValue);
+    inner2->Append(WrapUnique(new StringValue("hello")));
+    inner->Append(WrapUnique(new DictionaryValue));
     inner->Append(std::move(inner2));
     root->Set("list_with_empty_children", std::move(inner));
     root = root->DeepCopyWithoutEmptyChildren();
@@ -747,18 +755,18 @@
 }
 
 TEST(ValuesTest, MergeDictionary) {
-  scoped_ptr<DictionaryValue> base(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> base(new DictionaryValue);
   base->SetString("base_key", "base_key_value_base");
   base->SetString("collide_key", "collide_key_value_base");
-  scoped_ptr<DictionaryValue> base_sub_dict(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> base_sub_dict(new DictionaryValue);
   base_sub_dict->SetString("sub_base_key", "sub_base_key_value_base");
   base_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_base");
   base->Set("sub_dict_key", std::move(base_sub_dict));
 
-  scoped_ptr<DictionaryValue> merge(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> merge(new DictionaryValue);
   merge->SetString("merge_key", "merge_key_value_merge");
   merge->SetString("collide_key", "collide_key_value_merge");
-  scoped_ptr<DictionaryValue> merge_sub_dict(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> merge_sub_dict(new DictionaryValue);
   merge_sub_dict->SetString("sub_merge_key", "sub_merge_key_value_merge");
   merge_sub_dict->SetString("sub_collide_key", "sub_collide_key_value_merge");
   merge->Set("sub_dict_key", std::move(merge_sub_dict));
@@ -792,7 +800,7 @@
 }
 
 TEST(ValuesTest, MergeDictionaryDeepCopy) {
-  scoped_ptr<DictionaryValue> child(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> child(new DictionaryValue);
   DictionaryValue* original_child = child.get();
   child->SetString("test", "value");
   EXPECT_EQ(1U, child->size());
@@ -801,7 +809,7 @@
   EXPECT_TRUE(child->GetString("test", &value));
   EXPECT_EQ("value", value);
 
-  scoped_ptr<DictionaryValue> base(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> base(new DictionaryValue);
   base->Set("dict", std::move(child));
   EXPECT_EQ(1U, base->size());
 
@@ -809,7 +817,7 @@
   EXPECT_TRUE(base->GetDictionary("dict", &ptr));
   EXPECT_EQ(original_child, ptr);
 
-  scoped_ptr<DictionaryValue> merged(new DictionaryValue);
+  std::unique_ptr<DictionaryValue> merged(new DictionaryValue);
   merged->MergeDictionary(base.get());
   EXPECT_EQ(1U, merged->size());
   EXPECT_TRUE(merged->GetDictionary("dict", &ptr));
diff --git a/base/win/scoped_handle_test_dll.cc b/base/win/scoped_handle_test_dll.cc
index e6e1215..440a4ca 100644
--- a/base/win/scoped_handle_test_dll.cc
+++ b/base/win/scoped_handle_test_dll.cc
@@ -6,11 +6,9 @@
 
 #include <vector>
 
+#include "base/win/current_module.h"
 #include "base/win/scoped_handle.h"
 
-// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
-extern "C" IMAGE_DOS_HEADER __ImageBase;
-
 namespace base {
 namespace win {
 namespace testing {
@@ -95,7 +93,7 @@
     return false;
 
   // Get my module
-  HMODULE my_module = reinterpret_cast<HMODULE>(&__ImageBase);
+  HMODULE my_module = CURRENT_MODULE();
   if (!my_module)
     return false;
 
diff --git a/build/build_config.h b/build/build_config.h
index e152a66..80a93d3 100644
--- a/build/build_config.h
+++ b/build/build_config.h
@@ -159,7 +159,7 @@
 #define ARCH_CPU_LITTLE_ENDIAN 1
 #elif defined(__MIPSEL__)
 #if defined(__LP64__)
-#define ARCH_CPU_MIPS64_FAMILY 1
+#define ARCH_CPU_MIPS_FAMILY 1
 #define ARCH_CPU_MIPS64EL 1
 #define ARCH_CPU_64_BITS 1
 #define ARCH_CPU_LITTLE_ENDIAN 1
diff --git a/build/buildflag.h b/build/buildflag.h
new file mode 100644
index 0000000..5776a75
--- /dev/null
+++ b/build/buildflag.h
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BUILD_BUILDFLAG_H_
+#define BUILD_BUILDFLAG_H_
+
+// These macros un-mangle the names of the build flags in a way that looks
+// natural, and gives errors if the flag is not defined. Normally in the
+// preprocessor it's easy to make mistakes that interpret "you haven't done
+// the setup to know what the flag is" as "flag is off". Normally you would
+// include the generated header rather than include this file directly.
+//
+// This is for use with generated headers. See build/buildflag_header.gni.
+
+// This dance of two macros does a concatenation of two preprocessor args using
+// ## doubly indirectly because using ## directly prevents macros in that
+// parameter from being expanded.
+#define BUILDFLAG_CAT_INDIRECT(a, b) a ## b
+#define BUILDFLAG_CAT(a, b) BUILDFLAG_CAT_INDIRECT(a, b)
+
+// Accessor for build flags.
+//
+// To test for a value, if the build file specifies:
+//
+//   ENABLE_FOO=true
+//
+// Then you would check at build-time in source code with:
+//
+//   #include "foo_flags.h"  // The header the build file specified.
+//
+//   #if BUILDFLAG(ENABLE_FOO)
+//     ...
+//   #endif
+//
+// There will no #define called ENABLE_FOO so if you accidentally test for
+// whether that is defined, it will always be negative. You can also use
+// the value in expressions:
+//
+//   const char kSpamServerName[] = BUILDFLAG(SPAM_SERVER_NAME);
+//
+// Because the flag is accessed as a preprocessor macro with (), an error
+// will be thrown if the proper header defining the internal flag value has
+// not been included.
+#define BUILDFLAG(flag) (BUILDFLAG_CAT(BUILDFLAG_INTERNAL_, flag)())
+
+#endif  // BUILD_BUILDFLAG_H_
diff --git a/components/timers/alarm_timer_chromeos.cc b/components/timers/alarm_timer_chromeos.cc
index ae14870..3f1abbf 100644
--- a/components/timers/alarm_timer_chromeos.cc
+++ b/components/timers/alarm_timer_chromeos.cc
@@ -16,8 +16,8 @@
 #include "base/macros.h"
 #include "base/message_loop/message_loop.h"
 #include "base/pending_task.h"
-#include "base/thread_task_runner_handle.h"
 #include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
 #include "base/trace_event/trace_event.h"
 
 namespace timers {
@@ -145,7 +145,7 @@
   base::Closure on_timer_fired_callback_for_test_;
 
   // Manages watching file descriptors.
-  scoped_ptr<base::MessageLoopForIO::FileDescriptorWatcher> fd_watcher_;
+  std::unique_ptr<base::MessageLoopForIO::FileDescriptorWatcher> fd_watcher_;
 
   // The sequence numbers of the last Reset() call handled respectively on
   // |origin_task_runner_| and on the MessageLoopForIO used for watching the
@@ -229,8 +229,12 @@
 
   // Now clear the timer.
   DCHECK_NE(alarm_fd_, -1);
+#if defined(ANDROID)
   itimerspec blank_time;
   memset(&blank_time, 0, sizeof(blank_time));
+#else
+  itimerspec blank_time = {};
+#endif  // defined(ANDROID)
   if (timerfd_settime(alarm_fd_, 0, &blank_time, NULL) < 0)
     PLOG(ERROR) << "Unable to clear alarm time.  Timer may still fire.";
 }
@@ -254,7 +258,7 @@
   }
 }
 
-void AlarmTimer::Delegate::OnFileCanWriteWithoutBlocking(int /* fd */) {
+void AlarmTimer::Delegate::OnFileCanWriteWithoutBlocking(int /*fd*/) {
   NOTREACHED();
 }
 
@@ -286,8 +290,12 @@
 
   // Actually set the timer.  This will also clear the pre-existing timer, if
   // any.
+#if defined(ANDROID)
   itimerspec alarm_time;
   memset(&alarm_time, 0, sizeof(alarm_time));
+#else
+  itimerspec alarm_time = {};
+#endif  // defined(ANDROID)
   alarm_time.it_value.tv_sec = delay.InSeconds();
   alarm_time.it_value.tv_nsec =
       (delay.InMicroseconds() % base::Time::kMicrosecondsPerSecond) *
@@ -428,7 +436,8 @@
 
   // Take ownership of the pending user task, which is going to be cleared by
   // the Stop() or Reset() functions below.
-  scoped_ptr<base::PendingTask> pending_user_task(std::move(pending_task_));
+  std::unique_ptr<base::PendingTask> pending_user_task(
+      std::move(pending_task_));
 
   // Re-schedule or stop the timer as requested.
   if (base::Timer::is_repeating())
diff --git a/components/timers/alarm_timer_chromeos.h b/components/timers/alarm_timer_chromeos.h
index 2f6b0ff..313c9f9 100644
--- a/components/timers/alarm_timer_chromeos.h
+++ b/components/timers/alarm_timer_chromeos.h
@@ -5,6 +5,8 @@
 #ifndef COMPONENTS_TIMERS_ALARM_TIMER_CHROMEOS_H_
 #define COMPONENTS_TIMERS_ALARM_TIMER_CHROMEOS_H_
 
+#include <memory>
+
 #include "base/callback.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
@@ -67,7 +69,7 @@
 
   // Keeps track of the user task we want to run.  A new one is constructed
   // every time Reset() is called.
-  scoped_ptr<base::PendingTask> pending_task_;
+  std::unique_ptr<base::PendingTask> pending_task_;
 
   // Tracks whether the timer has the ability to wake the system up from
   // suspend.  This is a runtime check because we won't know if the system
@@ -82,7 +84,7 @@
   // Observes |origin_message_loop_| and informs this class if it will be
   // destroyed.
   class MessageLoopObserver;
-  scoped_ptr<MessageLoopObserver> message_loop_observer_;
+  std::unique_ptr<MessageLoopObserver> message_loop_observer_;
 
   base::WeakPtrFactory<AlarmTimer> weak_factory_;
 
diff --git a/crypto/BUILD.gn b/crypto/BUILD.gn
index 4e339cc..088a5c1 100644
--- a/crypto/BUILD.gn
+++ b/crypto/BUILD.gn
@@ -8,8 +8,8 @@
 component("crypto") {
   output_name = "crcrypto"  # Avoid colliding with OpenSSL's libcrypto.
   sources = [
-    "aead_openssl.cc",
-    "aead_openssl.h",
+    "aead.cc",
+    "aead.h",
     "apple_keychain.h",
     "apple_keychain_ios.mm",
     "apple_keychain_mac.mm",
@@ -19,28 +19,20 @@
     "crypto_export.h",
     "cssm_init.cc",
     "cssm_init.h",
-    "curve25519-donna.c",
+    "curve25519.cc",
     "curve25519.h",
-    "curve25519_nss.cc",
-    "curve25519_openssl.cc",
+    "ec_private_key.cc",
     "ec_private_key.h",
-    "ec_private_key_nss.cc",
-    "ec_private_key_openssl.cc",
     "ec_signature_creator.cc",
     "ec_signature_creator.h",
+    "ec_signature_creator_impl.cc",
     "ec_signature_creator_impl.h",
-    "ec_signature_creator_nss.cc",
-    "ec_signature_creator_openssl.cc",
     "encryptor.cc",
     "encryptor.h",
-    "encryptor_nss.cc",
-    "encryptor_openssl.cc",
     "hkdf.cc",
     "hkdf.h",
     "hmac.cc",
     "hmac.h",
-    "hmac_nss.cc",
-    "hmac_openssl.cc",
     "mac_security_services_lock.cc",
     "mac_security_services_lock.h",
 
@@ -67,34 +59,20 @@
     "random.h",
     "rsa_private_key.cc",
     "rsa_private_key.h",
-    "rsa_private_key_nss.cc",
-    "rsa_private_key_openssl.cc",
     "scoped_capi_types.h",
     "scoped_nss_types.h",
+    "secure_hash.cc",
     "secure_hash.h",
-    "secure_hash_default.cc",
-    "secure_hash_openssl.cc",
     "secure_util.cc",
     "secure_util.h",
     "sha2.cc",
     "sha2.h",
+    "signature_creator.cc",
     "signature_creator.h",
-    "signature_creator_nss.cc",
-    "signature_creator_openssl.cc",
+    "signature_verifier.cc",
     "signature_verifier.h",
-    "signature_verifier_nss.cc",
-    "signature_verifier_openssl.cc",
+    "symmetric_key.cc",
     "symmetric_key.h",
-    "symmetric_key_nss.cc",
-    "symmetric_key_openssl.cc",
-    "third_party/nss/chromium-blapi.h",
-    "third_party/nss/chromium-blapit.h",
-    "third_party/nss/chromium-nss.h",
-    "third_party/nss/chromium-sha256.h",
-    "third_party/nss/pk11akey.cc",
-    "third_party/nss/rsawrapr.c",
-    "third_party/nss/secsign.cc",
-    "third_party/nss/sha512.cc",
   ]
 
   # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
@@ -112,6 +90,11 @@
       "mock_apple_keychain.cc",
       "mock_apple_keychain.h",
     ]
+  } else {
+    libs = [
+      "CoreFoundation.framework",
+      "Security.framework",
+    ]
   }
 
   if (!is_mac) {
@@ -129,57 +112,8 @@
     ]
   }
 
-  if (is_android) {
-    deps += [ "//third_party/android_tools:cpu_features" ]
-  }
-
-  if (use_openssl) {
-    # Remove NSS files when using OpenSSL
-    sources -= [
-      "curve25519-donna.c",
-      "curve25519_nss.cc",
-      "ec_private_key_nss.cc",
-      "ec_signature_creator_nss.cc",
-      "encryptor_nss.cc",
-      "hmac_nss.cc",
-      "rsa_private_key_nss.cc",
-      "secure_hash_default.cc",
-      "signature_creator_nss.cc",
-      "signature_verifier_nss.cc",
-      "symmetric_key_nss.cc",
-      "third_party/nss/chromium-blapi.h",
-      "third_party/nss/chromium-blapit.h",
-      "third_party/nss/chromium-nss.h",
-      "third_party/nss/pk11akey.cc",
-      "third_party/nss/rsawrapr.c",
-      "third_party/nss/secsign.cc",
-    ]
-  } else {
-    # Remove OpenSSL when using NSS.
-    sources -= [
-      "aead_openssl.cc",
-      "aead_openssl.h",
-      "auto_cbb.h",
-      "curve25519_openssl.cc",
-      "ec_private_key_openssl.cc",
-      "ec_signature_creator_openssl.cc",
-      "encryptor_openssl.cc",
-      "hmac_openssl.cc",
-      "openssl_bio_string.cc",
-      "openssl_bio_string.h",
-      "openssl_util.cc",
-      "openssl_util.h",
-      "rsa_private_key_openssl.cc",
-      "secure_hash_openssl.cc",
-      "signature_creator_openssl.cc",
-      "signature_verifier_openssl.cc",
-      "symmetric_key_openssl.cc",
-    ]
-  }
-
-  # Some files are built when NSS is used at all, either for the internal crypto
-  # library or the platform certificate library.
-  if (use_openssl && !use_nss_certs) {
+  # Some files are built when NSS is used for the platform certificate library.
+  if (!use_nss_certs) {
     sources -= [
       "nss_key_util.cc",
       "nss_key_util.h",
@@ -196,41 +130,9 @@
   }
 }
 
-# TODO(GYP): TODO(dpranke), fix the compile errors for this stuff
-# and make it work.
-if (false && is_win) {
-  # A minimal crypto subset for hmac-related stuff that small standalone
-  # targets can use to reduce code size on Windows. This does not depend on
-  # OpenSSL/NSS but will use Windows APIs for that functionality.
-  source_set("crypto_minimal_win") {
-    sources = [
-      "crypto_export.h",
-      "hmac.cc",
-      "hmac.h",
-      "hmac_win.cc",
-      "scoped_capi_types.h",
-      "scoped_nss_types.h",
-      "secure_util.cc",
-      "secure_util.h",
-      "symmetric_key.h",
-      "symmetric_key_win.cc",
-      "third_party/nss/chromium-blapi.h",
-      "third_party/nss/chromium-sha256.h",
-      "third_party/nss/sha512.cc",
-    ]
-
-    deps = [
-      "//base",
-      "//base/third_party/dynamic_annotations",
-    ]
-
-    defines = [ "CRYPTO_IMPLEMENTATION" ]
-  }
-}
-
 test("crypto_unittests") {
   sources = [
-    "aead_openssl_unittest.cc",
+    "aead_unittest.cc",
     "curve25519_unittest.cc",
     "ec_private_key_unittest.cc",
     "ec_signature_creator_unittest.cc",
@@ -251,19 +153,14 @@
     "symmetric_key_unittest.cc",
   ]
 
-  # Some files are built when NSS is used at all, either for the internal crypto
-  # library or the platform certificate library.
-  if (use_openssl && !use_nss_certs) {
+  # Some files are built when NSS is used for the platform certificate library.
+  if (!use_nss_certs) {
     sources -= [
       "nss_key_util_unittest.cc",
       "nss_util_unittest.cc",
     ]
   }
 
-  if (!use_openssl) {
-    sources -= [ "openssl_bio_string_unittest.cc" ]
-  }
-
   configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
 
   deps = [
@@ -311,7 +208,7 @@
 }
 
 config("platform_config") {
-  if ((!use_openssl || use_nss_certs) && is_clang) {
+  if (use_nss_certs && is_clang) {
     # There is a broken header guard in /usr/include/nss/secmod.h:
     # https://bugzilla.mozilla.org/show_bug.cgi?id=884072
     cflags = [ "-Wno-header-guard" ]
@@ -322,40 +219,14 @@
 # according to the state of the crypto flags. A target just wanting to depend
 # on the current SSL library should just depend on this.
 group("platform") {
-  if (use_openssl) {
-    public_deps = [
-      "//third_party/boringssl",
-    ]
-  } else {
-    public_deps = [
-      "//net/third_party/nss/ssl:libssl",
-    ]
-  }
+  public_deps = [
+    "//third_party/boringssl",
+  ]
 
-  # Link in NSS if it is used for either the internal crypto library
-  # (!use_openssl) or platform certificate library (use_nss_certs).
-  if (!use_openssl || use_nss_certs) {
-    if (is_linux) {
-      # On Linux, we use the system NSS (excepting SSL where we always use our
-      # own).
-      public_configs = [ ":platform_config" ]
-      if (!use_openssl) {
-        # If using a bundled copy of NSS's SSL library, ensure the bundled SSL
-        # header search path comes before the system one so our versions are
-        # used. The libssl target will add the search path we want, but
-        # according to GN's ordering rules, public_configs' search path will get
-        # applied before ones inherited from our dependencies.  Therefore, we
-        # need to explicitly list our custom libssl's config here before the
-        # system one.
-        public_configs += [ "//net/third_party/nss/ssl:ssl_config" ]
-      }
-      public_configs += [ "//third_party/nss:system_nss_no_ssl_config" ]
-    } else {
-      # Non-Linux platforms use the hermetic NSS from the tree.
-      public_deps += [
-        "//third_party/nss:nspr",
-        "//third_party/nss:nss",
-      ]
-    }
+  # Link in NSS if it is used for the platform certificate library
+  # (use_nss_certs).
+  if (use_nss_certs) {
+    public_configs = [ ":platform_config" ]
+    public_configs += [ "//third_party/nss:system_nss_no_ssl_config" ]
   }
 }
diff --git a/crypto/crypto.gyp b/crypto/crypto.gyp
index e2472d7..8ed2ab2 100644
--- a/crypto/crypto.gyp
+++ b/crypto/crypto.gyp
@@ -17,6 +17,7 @@
       'dependencies': [
         '../base/base.gyp:base',
         '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+        '../third_party/boringssl/boringssl.gyp:boringssl',
       ],
       'defines': [
         'CRYPTO_IMPLEMENTATION',
@@ -24,10 +25,10 @@
       'conditions': [
         [ 'os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
           'dependencies': [
-            '../build/linux/system.gyp:ssl',
+            '../build/linux/system.gyp:nss',
           ],
           'export_dependent_settings': [
-            '../build/linux/system.gyp:ssl',
+            '../build/linux/system.gyp:nss',
           ],
           'conditions': [
             [ 'chromeos==1', {
@@ -35,11 +36,6 @@
               },
             ],
           ],
-        }, {  # os_posix != 1 or OS == "mac" or OS == "ios" or OS == "android"
-            'sources!': [
-              'hmac_win.cc',
-              'symmetric_key_win.cc',
-            ],
         }],
         [ 'OS != "mac" and OS != "ios"', {
           'sources!': [
@@ -48,11 +44,6 @@
             'mock_apple_keychain.h',
           ],
         }],
-        [ 'OS == "android"', {
-          'dependencies': [
-            '../build/android/ndk.gyp:cpu_features',
-          ],
-        }],
         [ 'os_bsd==1', {
           'link_settings': {
             'libraries': [
@@ -75,16 +66,6 @@
             'mac_security_services_lock.h',
           ],
         }],
-        [ 'use_openssl == 0 and (OS == "mac" or OS == "ios" or OS == "win")', {
-          'dependencies': [
-            '../third_party/nss/nss.gyp:nspr',
-            '../third_party/nss/nss.gyp:nss',
-          ],
-          'export_dependent_settings': [
-            '../third_party/nss/nss.gyp:nspr',
-            '../third_party/nss/nss.gyp:nss',
-          ],
-        }],
         [ 'OS != "win"', {
           'sources!': [
             'capi_util.h',
@@ -96,58 +77,8 @@
             4267,  # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
           ],
         }],
-        [ 'use_openssl==1', {
-            'dependencies': [
-              '../third_party/boringssl/boringssl.gyp:boringssl',
-            ],
-            # TODO(joth): Use a glob to match exclude patterns once the
-            #             OpenSSL file set is complete.
-            'sources!': [
-              'curve25519-donna.c',
-              'curve25519_nss.cc',
-              'ec_private_key_nss.cc',
-              'ec_signature_creator_nss.cc',
-              'encryptor_nss.cc',
-              'hmac_nss.cc',
-              'rsa_private_key_nss.cc',
-              'secure_hash_default.cc',
-              'signature_creator_nss.cc',
-              'signature_verifier_nss.cc',
-              'symmetric_key_nss.cc',
-              'third_party/nss/chromium-blapi.h',
-              'third_party/nss/chromium-blapit.h',
-              'third_party/nss/chromium-nss.h',
-              'third_party/nss/chromium-prtypes.h',
-              'third_party/nss/chromium-sha256.h',
-              'third_party/nss/pk11akey.cc',
-              'third_party/nss/rsawrapr.c',
-              'third_party/nss/secsign.cc',
-              'third_party/nss/sha512.cc',
-            ],
-          }, {
-            'sources!': [
-              'aead_openssl.cc',
-              'aead_openssl.h',
-              'auto_cbb.h',
-              'curve25519_openssl.cc',
-              'ec_private_key_openssl.cc',
-              'ec_signature_creator_openssl.cc',
-              'encryptor_openssl.cc',
-              'hmac_openssl.cc',
-              'openssl_bio_string.cc',
-              'openssl_bio_string.h',
-              'openssl_util.cc',
-              'openssl_util.h',
-              'rsa_private_key_openssl.cc',
-              'secure_hash_openssl.cc',
-              'signature_creator_openssl.cc',
-              'signature_verifier_openssl.cc',
-              'symmetric_key_openssl.cc',
-            ],
-        },],
-        [ 'use_openssl==1 and use_nss_certs==0', {
-            # Some files are built when NSS is used at all, either for the
-            # internal crypto library or the platform certificate library.
+        [ 'use_nss_certs==0', {
+            # Some files are built when NSS is used for the platform certificate library.
             'sources!': [
               'nss_key_util.cc',
               'nss_key_util.h',
@@ -165,7 +96,7 @@
       'target_name': 'crypto_unittests',
       'type': 'executable',
       'sources': [
-        'aead_openssl_unittest.cc',
+        'aead_unittest.cc',
         'curve25519_unittest.cc',
         'ec_private_key_unittest.cc',
         'ec_signature_creator_unittest.cc',
@@ -193,39 +124,25 @@
         '../base/base.gyp:test_support_base',
         '../testing/gmock.gyp:gmock',
         '../testing/gtest.gyp:gtest',
+        '../third_party/boringssl/boringssl.gyp:boringssl',
       ],
       'conditions': [
         [ 'use_nss_certs == 1', {
           'dependencies': [
-            '../build/linux/system.gyp:ssl',
+            '../build/linux/system.gyp:nss',
           ],
         }],
-        [ 'use_openssl == 1 and use_nss_certs == 0', {
-          # Some files are built when NSS is used at all, either for the
-          # internal crypto library or the platform certificate library.
+        [ 'use_nss_certs == 0', {
+          # Some files are built when NSS is used for the platform certificate library.
           'sources!': [
             'nss_key_util_unittest.cc',
             'nss_util_unittest.cc',
           ],
         }],
-        [ 'use_openssl == 0 and (OS == "mac" or OS == "ios" or OS == "win")', {
-          'dependencies': [
-            '../third_party/nss/nss.gyp:nspr',
-          ],
-        }],
         [ 'OS == "win"', {
           # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
           'msvs_disabled_warnings': [4267, ],
         }],
-        [ 'use_openssl==1', {
-          'dependencies': [
-            '../third_party/boringssl/boringssl.gyp:boringssl',
-          ],
-        }, {
-          'sources!': [
-            'openssl_bio_string_unittest.cc',
-          ],
-        }],
       ],
     },
   ],
@@ -234,13 +151,12 @@
       'targets': [
         {
           'target_name': 'crypto_nacl_win64',
-          # We do not want nacl_helper to depend on NSS because this would
-          # require including a 64-bit copy of NSS. Thus, use the native APIs
-          # for the helper.
+          # We use the native APIs for the helper.
           'type': '<(component)',
           'dependencies': [
             '../base/base.gyp:base_win64',
             '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
+            '../third_party/boringssl/boringssl.gyp:boringssl_nacl_win64',
           ],
           'sources': [
             '<@(nacl_win64_sources)',
diff --git a/crypto/crypto.gypi b/crypto/crypto.gypi
index 143d555..dadc0ea 100644
--- a/crypto/crypto.gypi
+++ b/crypto/crypto.gypi
@@ -8,56 +8,43 @@
     # This is required so that we can build them for nacl win64.
     'variables': {
       'hmac_win64_related_sources': [
+        'crypto_export.h',
         'hmac.cc',
         'hmac.h',
-        'hmac_win.cc',
+        'openssl_util.cc',
+        'openssl_util.h',
         'secure_util.cc',
         'secure_util.h',
+        'symmetric_key.cc',
         'symmetric_key.h',
-        'symmetric_key_win.cc',
-        'third_party/nss/chromium-blapi.h',
-        'third_party/nss/chromium-blapit.h',
-        'third_party/nss/chromium-prtypes.h',
-        'third_party/nss/chromium-sha256.h',
-        'third_party/nss/sha512.cc',
-        'wincrypt_shim.h',
       ],
     },
     'crypto_sources': [
       # NOTE: all transitive dependencies of HMAC on windows need
       #     to be placed in the source list above.
       '<@(hmac_win64_related_sources)',
-      'aead_openssl.cc',
-      'aead_openssl.h',
+      'aead.cc',
+      'aead.h',
       'apple_keychain.h',
       'apple_keychain_ios.mm',
       'apple_keychain_mac.mm',
       'auto_cbb.h',
       'capi_util.cc',
       'capi_util.h',
-      'crypto_export.h',
       'cssm_init.cc',
       'cssm_init.h',
-      'curve25519-donna.c',
+      'curve25519.cc',
       'curve25519.h',
-      'curve25519_nss.cc',
-      'curve25519_openssl.cc',
+      'ec_private_key.cc',
       'ec_private_key.h',
-      'ec_private_key_nss.cc',
-      'ec_private_key_openssl.cc',
       'ec_signature_creator.cc',
       'ec_signature_creator.h',
+      'ec_signature_creator_impl.cc',
       'ec_signature_creator_impl.h',
-      'ec_signature_creator_nss.cc',
-      'ec_signature_creator_openssl.cc',
       'encryptor.cc',
       'encryptor.h',
-      'encryptor_nss.cc',
-      'encryptor_openssl.cc',
       'hkdf.cc',
       'hkdf.h',
-      'hmac_nss.cc',
-      'hmac_openssl.cc',
       'mac_security_services_lock.cc',
       'mac_security_services_lock.h',
       'mock_apple_keychain.cc',
@@ -74,35 +61,23 @@
       'nss_util_internal.h',
       'openssl_bio_string.cc',
       'openssl_bio_string.h',
-      'openssl_util.cc',
-      'openssl_util.h',
       'p224.cc',
       'p224.h',
       'random.h',
       'random.cc',
       'rsa_private_key.cc',
       'rsa_private_key.h',
-      'rsa_private_key_nss.cc',
-      'rsa_private_key_openssl.cc',
       'scoped_capi_types.h',
       'scoped_nss_types.h',
+      'secure_hash.cc',
       'secure_hash.h',
-      'secure_hash_default.cc',
-      'secure_hash_openssl.cc',
       'sha2.cc',
       'sha2.h',
+      'signature_creator.cc',
       'signature_creator.h',
-      'signature_creator_nss.cc',
-      'signature_creator_openssl.cc',
+      'signature_verifier.cc',
       'signature_verifier.h',
-      'signature_verifier_nss.cc',
-      'signature_verifier_openssl.cc',
-      'symmetric_key_nss.cc',
-      'symmetric_key_openssl.cc',
-      'third_party/nss/chromium-nss.h',
-      'third_party/nss/pk11akey.cc',
-      'third_party/nss/rsawrapr.c',
-      'third_party/nss/secsign.cc',
+      'wincrypt_shim.h',
     ],
     'nacl_win64_sources': [
       '<@(hmac_win64_related_sources)',
diff --git a/crypto/crypto_nacl.gyp b/crypto/crypto_nacl.gyp
index 255c42c..c7c01a8 100644
--- a/crypto/crypto_nacl.gyp
+++ b/crypto/crypto_nacl.gyp
@@ -38,8 +38,6 @@
         ['exclude', '^cssm_'],
         ['exclude', '^nss_'],
         ['exclude', '^mac_'],
-        ['exclude', '^third_party/nss/'],
-        ['include', '^third_party/nss/sha512.cc'],
       ],
     },
   ],
diff --git a/crypto/curve25519-donna.c b/crypto/curve25519-donna.c
deleted file mode 100644
index f141ac0..0000000
--- a/crypto/curve25519-donna.c
+++ /dev/null
@@ -1,592 +0,0 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-/*
- * curve25519-donna: Curve25519 elliptic curve, public key function
- *
- * http://code.google.com/p/curve25519-donna/
- *
- * Adam Langley <agl@imperialviolet.org>
- *
- * Derived from public domain C code by Daniel J. Bernstein <djb@cr.yp.to>
- *
- * More information about curve25519 can be found here
- *   http://cr.yp.to/ecdh.html
- *
- * djb's sample implementation of curve25519 is written in a special assembly
- * language called qhasm and uses the floating point registers.
- *
- * This is, almost, a clean room reimplementation from the curve25519 paper. It
- * uses many of the tricks described therein. Only the crecip function is taken
- * from the sample implementation.
- */
-
-#include <string.h>
-#include <stdint.h>
-
-typedef uint8_t u8;
-typedef int32_t s32;
-typedef int64_t limb;
-
-/* Field element representation:
- *
- * Field elements are written as an array of signed, 64-bit limbs, least
- * significant first. The value of the field element is:
- *   x[0] + 2^26·x[1] + x^51·x[2] + 2^102·x[3] + ...
- *
- * i.e. the limbs are 26, 25, 26, 25, ... bits wide.
- */
-
-/* Sum two numbers: output += in */
-static void fsum(limb *output, const limb *in) {
-  unsigned i;
-  for (i = 0; i < 10; i += 2) {
-    output[0+i] = (output[0+i] + in[0+i]);
-    output[1+i] = (output[1+i] + in[1+i]);
-  }
-}
-
-/* Find the difference of two numbers: output = in - output
- * (note the order of the arguments!)
- */
-static void fdifference(limb *output, const limb *in) {
-  unsigned i;
-  for (i = 0; i < 10; ++i) {
-    output[i] = (in[i] - output[i]);
-  }
-}
-
-/* Multiply a number my a scalar: output = in * scalar */
-static void fscalar_product(limb *output, const limb *in, const limb scalar) {
-  unsigned i;
-  for (i = 0; i < 10; ++i) {
-    output[i] = in[i] * scalar;
-  }
-}
-
-/* Multiply two numbers: output = in2 * in
- *
- * output must be distinct to both inputs. The inputs are reduced coefficient
- * form, the output is not.
- */
-static void fproduct(limb *output, const limb *in2, const limb *in) {
-  output[0] =       ((limb) ((s32) in2[0])) * ((s32) in[0]);
-  output[1] =       ((limb) ((s32) in2[0])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[0]);
-  output[2] =  2 *  ((limb) ((s32) in2[1])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[0]);
-  output[3] =       ((limb) ((s32) in2[1])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[0]);
-  output[4] =       ((limb) ((s32) in2[2])) * ((s32) in[2]) +
-               2 * (((limb) ((s32) in2[1])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[0]);
-  output[5] =       ((limb) ((s32) in2[2])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[0]);
-  output[6] =  2 * (((limb) ((s32) in2[3])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[0]);
-  output[7] =       ((limb) ((s32) in2[3])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[0]);
-  output[8] =       ((limb) ((s32) in2[4])) * ((s32) in[4]) +
-               2 * (((limb) ((s32) in2[3])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[0]);
-  output[9] =       ((limb) ((s32) in2[4])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[2]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[1]) +
-                    ((limb) ((s32) in2[0])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[0]);
-  output[10] = 2 * (((limb) ((s32) in2[5])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[1])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[1])) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[2]);
-  output[11] =      ((limb) ((s32) in2[5])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[4]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[3]) +
-                    ((limb) ((s32) in2[2])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[2]);
-  output[12] =      ((limb) ((s32) in2[6])) * ((s32) in[6]) +
-               2 * (((limb) ((s32) in2[5])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[3])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[3])) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[4]);
-  output[13] =      ((limb) ((s32) in2[6])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[7])) * ((s32) in[6]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[5]) +
-                    ((limb) ((s32) in2[4])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[4]);
-  output[14] = 2 * (((limb) ((s32) in2[7])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[5])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[5])) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[6]);
-  output[15] =      ((limb) ((s32) in2[7])) * ((s32) in[8]) +
-                    ((limb) ((s32) in2[8])) * ((s32) in[7]) +
-                    ((limb) ((s32) in2[6])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[6]);
-  output[16] =      ((limb) ((s32) in2[8])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in2[7])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[7]));
-  output[17] =      ((limb) ((s32) in2[8])) * ((s32) in[9]) +
-                    ((limb) ((s32) in2[9])) * ((s32) in[8]);
-  output[18] = 2 *  ((limb) ((s32) in2[9])) * ((s32) in[9]);
-}
-
-/* Reduce a long form to a short form by taking the input mod 2^255 - 19. */
-static void freduce_degree(limb *output) {
-  /* Each of these shifts and adds ends up multiplying the value by 19. */
-  output[8] += output[18] << 4;
-  output[8] += output[18] << 1;
-  output[8] += output[18];
-  output[7] += output[17] << 4;
-  output[7] += output[17] << 1;
-  output[7] += output[17];
-  output[6] += output[16] << 4;
-  output[6] += output[16] << 1;
-  output[6] += output[16];
-  output[5] += output[15] << 4;
-  output[5] += output[15] << 1;
-  output[5] += output[15];
-  output[4] += output[14] << 4;
-  output[4] += output[14] << 1;
-  output[4] += output[14];
-  output[3] += output[13] << 4;
-  output[3] += output[13] << 1;
-  output[3] += output[13];
-  output[2] += output[12] << 4;
-  output[2] += output[12] << 1;
-  output[2] += output[12];
-  output[1] += output[11] << 4;
-  output[1] += output[11] << 1;
-  output[1] += output[11];
-  output[0] += output[10] << 4;
-  output[0] += output[10] << 1;
-  output[0] += output[10];
-}
-
-/* Reduce all coefficients of the short form input so that |x| < 2^26.
- *
- * On entry: |output[i]| < 2^62
- */
-static void freduce_coefficients(limb *output) {
-  unsigned i;
-  do {
-    output[10] = 0;
-
-    for (i = 0; i < 10; i += 2) {
-      limb over = output[i] / 0x4000000l;
-      output[i+1] += over;
-      output[i] -= over * 0x4000000l;
-
-      over = output[i+1] / 0x2000000;
-      output[i+2] += over;
-      output[i+1] -= over * 0x2000000;
-    }
-    output[0] += 19 * output[10];
-  } while (output[10]);
-}
-
-/* A helpful wrapper around fproduct: output = in * in2.
- *
- * output must be distinct to both inputs. The output is reduced degree and
- * reduced coefficient.
- */
-static void
-fmul(limb *output, const limb *in, const limb *in2) {
-  limb t[19];
-  fproduct(t, in, in2);
-  freduce_degree(t);
-  freduce_coefficients(t);
-  memcpy(output, t, sizeof(limb) * 10);
-}
-
-static void fsquare_inner(limb *output, const limb *in) {
-  output[0] =       ((limb) ((s32) in[0])) * ((s32) in[0]);
-  output[1] =  2 *  ((limb) ((s32) in[0])) * ((s32) in[1]);
-  output[2] =  2 * (((limb) ((s32) in[1])) * ((s32) in[1]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[2]));
-  output[3] =  2 * (((limb) ((s32) in[1])) * ((s32) in[2]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[3]));
-  output[4] =       ((limb) ((s32) in[2])) * ((s32) in[2]) +
-               4 *  ((limb) ((s32) in[1])) * ((s32) in[3]) +
-               2 *  ((limb) ((s32) in[0])) * ((s32) in[4]);
-  output[5] =  2 * (((limb) ((s32) in[2])) * ((s32) in[3]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[4]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[5]));
-  output[6] =  2 * (((limb) ((s32) in[3])) * ((s32) in[3]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[4]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[6]) +
-               2 *  ((limb) ((s32) in[1])) * ((s32) in[5]));
-  output[7] =  2 * (((limb) ((s32) in[3])) * ((s32) in[4]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[5]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[7]));
-  output[8] =       ((limb) ((s32) in[4])) * ((s32) in[4]) +
-               2 * (((limb) ((s32) in[2])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in[1])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[5])));
-  output[9] =  2 * (((limb) ((s32) in[4])) * ((s32) in[5]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[0])) * ((s32) in[9]));
-  output[10] = 2 * (((limb) ((s32) in[5])) * ((s32) in[5]) +
-                    ((limb) ((s32) in[4])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in[3])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[1])) * ((s32) in[9])));
-  output[11] = 2 * (((limb) ((s32) in[5])) * ((s32) in[6]) +
-                    ((limb) ((s32) in[4])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[2])) * ((s32) in[9]));
-  output[12] =      ((limb) ((s32) in[6])) * ((s32) in[6]) +
-               2 * (((limb) ((s32) in[4])) * ((s32) in[8]) +
-               2 * (((limb) ((s32) in[5])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[3])) * ((s32) in[9])));
-  output[13] = 2 * (((limb) ((s32) in[6])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[5])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[4])) * ((s32) in[9]));
-  output[14] = 2 * (((limb) ((s32) in[7])) * ((s32) in[7]) +
-                    ((limb) ((s32) in[6])) * ((s32) in[8]) +
-               2 *  ((limb) ((s32) in[5])) * ((s32) in[9]));
-  output[15] = 2 * (((limb) ((s32) in[7])) * ((s32) in[8]) +
-                    ((limb) ((s32) in[6])) * ((s32) in[9]));
-  output[16] =      ((limb) ((s32) in[8])) * ((s32) in[8]) +
-               4 *  ((limb) ((s32) in[7])) * ((s32) in[9]);
-  output[17] = 2 *  ((limb) ((s32) in[8])) * ((s32) in[9]);
-  output[18] = 2 *  ((limb) ((s32) in[9])) * ((s32) in[9]);
-}
-
-static void
-fsquare(limb *output, const limb *in) {
-  limb t[19];
-  fsquare_inner(t, in);
-  freduce_degree(t);
-  freduce_coefficients(t);
-  memcpy(output, t, sizeof(limb) * 10);
-}
-
-/* Take a little-endian, 32-byte number and expand it into polynomial form */
-static void
-fexpand(limb *output, const u8 *input) {
-#define F(n,start,shift,mask) \
-  output[n] = ((((limb) input[start + 0]) | \
-                ((limb) input[start + 1]) << 8 | \
-                ((limb) input[start + 2]) << 16 | \
-                ((limb) input[start + 3]) << 24) >> shift) & mask;
-  F(0, 0, 0, 0x3ffffff);
-  F(1, 3, 2, 0x1ffffff);
-  F(2, 6, 3, 0x3ffffff);
-  F(3, 9, 5, 0x1ffffff);
-  F(4, 12, 6, 0x3ffffff);
-  F(5, 16, 0, 0x1ffffff);
-  F(6, 19, 1, 0x3ffffff);
-  F(7, 22, 3, 0x1ffffff);
-  F(8, 25, 4, 0x3ffffff);
-  F(9, 28, 6, 0x1ffffff);
-#undef F
-}
-
-/* Take a fully reduced polynomial form number and contract it into a
- * little-endian, 32-byte array
- */
-static void
-fcontract(u8 *output, limb *input) {
-  int i;
-
-  do {
-    for (i = 0; i < 9; ++i) {
-      if ((i & 1) == 1) {
-        while (input[i] < 0) {
-          input[i] += 0x2000000;
-          input[i + 1]--;
-        }
-      } else {
-        while (input[i] < 0) {
-          input[i] += 0x4000000;
-          input[i + 1]--;
-        }
-      }
-    }
-    while (input[9] < 0) {
-      input[9] += 0x2000000;
-      input[0] -= 19;
-    }
-  } while (input[0] < 0);
-
-  input[1] <<= 2;
-  input[2] <<= 3;
-  input[3] <<= 5;
-  input[4] <<= 6;
-  input[6] <<= 1;
-  input[7] <<= 3;
-  input[8] <<= 4;
-  input[9] <<= 6;
-#define F(i, s) \
-  output[s+0] |=  input[i] & 0xff; \
-  output[s+1]  = (input[i] >> 8) & 0xff; \
-  output[s+2]  = (input[i] >> 16) & 0xff; \
-  output[s+3]  = (input[i] >> 24) & 0xff;
-  output[0] = 0;
-  output[16] = 0;
-  F(0,0);
-  F(1,3);
-  F(2,6);
-  F(3,9);
-  F(4,12);
-  F(5,16);
-  F(6,19);
-  F(7,22);
-  F(8,25);
-  F(9,28);
-#undef F
-}
-
-/* Input: Q, Q', Q-Q'
- * Output: 2Q, Q+Q'
- *
- *   x2 z3: long form
- *   x3 z3: long form
- *   x z: short form, destroyed
- *   xprime zprime: short form, destroyed
- *   qmqp: short form, preserved
- */
-static void fmonty(limb *x2, limb *z2,  /* output 2Q */
-                   limb *x3, limb *z3,  /* output Q + Q' */
-                   limb *x, limb *z,    /* input Q */
-                   limb *xprime, limb *zprime,  /* input Q' */
-                   const limb *qmqp /* input Q - Q' */) {
-  limb origx[10], origxprime[10], zzz[19], xx[19], zz[19], xxprime[19],
-        zzprime[19], zzzprime[19], xxxprime[19];
-
-  memcpy(origx, x, 10 * sizeof(limb));
-  fsum(x, z);
-  fdifference(z, origx);  // does x - z
-
-  memcpy(origxprime, xprime, sizeof(limb) * 10);
-  fsum(xprime, zprime);
-  fdifference(zprime, origxprime);
-  fproduct(xxprime, xprime, z);
-  fproduct(zzprime, x, zprime);
-  freduce_degree(xxprime);
-  freduce_coefficients(xxprime);
-  freduce_degree(zzprime);
-  freduce_coefficients(zzprime);
-  memcpy(origxprime, xxprime, sizeof(limb) * 10);
-  fsum(xxprime, zzprime);
-  fdifference(zzprime, origxprime);
-  fsquare(xxxprime, xxprime);
-  fsquare(zzzprime, zzprime);
-  fproduct(zzprime, zzzprime, qmqp);
-  freduce_degree(zzprime);
-  freduce_coefficients(zzprime);
-  memcpy(x3, xxxprime, sizeof(limb) * 10);
-  memcpy(z3, zzprime, sizeof(limb) * 10);
-
-  fsquare(xx, x);
-  fsquare(zz, z);
-  fproduct(x2, xx, zz);
-  freduce_degree(x2);
-  freduce_coefficients(x2);
-  fdifference(zz, xx);  // does zz = xx - zz
-  memset(zzz + 10, 0, sizeof(limb) * 9);
-  fscalar_product(zzz, zz, 121665);
-  freduce_degree(zzz);
-  freduce_coefficients(zzz);
-  fsum(zzz, xx);
-  fproduct(z2, zz, zzz);
-  freduce_degree(z2);
-  freduce_coefficients(z2);
-}
-
-/* Calculates nQ where Q is the x-coordinate of a point on the curve
- *
- *   resultx/resultz: the x coordinate of the resulting curve point (short form)
- *   n: a little endian, 32-byte number
- *   q: a point of the curve (short form)
- */
-static void
-cmult(limb *resultx, limb *resultz, const u8 *n, const limb *q) {
-  limb a[19] = {0}, b[19] = {1}, c[19] = {1}, d[19] = {0};
-  limb *nqpqx = a, *nqpqz = b, *nqx = c, *nqz = d, *t;
-  limb e[19] = {0}, f[19] = {1}, g[19] = {0}, h[19] = {1};
-  limb *nqpqx2 = e, *nqpqz2 = f, *nqx2 = g, *nqz2 = h;
-
-  unsigned i, j;
-
-  memcpy(nqpqx, q, sizeof(limb) * 10);
-
-  for (i = 0; i < 32; ++i) {
-    u8 byte = n[31 - i];
-    for (j = 0; j < 8; ++j) {
-      if (byte & 0x80) {
-        fmonty(nqpqx2, nqpqz2,
-               nqx2, nqz2,
-               nqpqx, nqpqz,
-               nqx, nqz,
-               q);
-      } else {
-        fmonty(nqx2, nqz2,
-               nqpqx2, nqpqz2,
-               nqx, nqz,
-               nqpqx, nqpqz,
-               q);
-      }
-
-      t = nqx;
-      nqx = nqx2;
-      nqx2 = t;
-      t = nqz;
-      nqz = nqz2;
-      nqz2 = t;
-      t = nqpqx;
-      nqpqx = nqpqx2;
-      nqpqx2 = t;
-      t = nqpqz;
-      nqpqz = nqpqz2;
-      nqpqz2 = t;
-
-      byte <<= 1;
-    }
-  }
-
-  memcpy(resultx, nqx, sizeof(limb) * 10);
-  memcpy(resultz, nqz, sizeof(limb) * 10);
-}
-
-// -----------------------------------------------------------------------------
-// Shamelessly copied from djb's code
-// -----------------------------------------------------------------------------
-static void
-crecip(limb *out, const limb *z) {
-  limb z2[10];
-  limb z9[10];
-  limb z11[10];
-  limb z2_5_0[10];
-  limb z2_10_0[10];
-  limb z2_20_0[10];
-  limb z2_50_0[10];
-  limb z2_100_0[10];
-  limb t0[10];
-  limb t1[10];
-  int i;
-
-  /* 2 */ fsquare(z2,z);
-  /* 4 */ fsquare(t1,z2);
-  /* 8 */ fsquare(t0,t1);
-  /* 9 */ fmul(z9,t0,z);
-  /* 11 */ fmul(z11,z9,z2);
-  /* 22 */ fsquare(t0,z11);
-  /* 2^5 - 2^0 = 31 */ fmul(z2_5_0,t0,z9);
-
-  /* 2^6 - 2^1 */ fsquare(t0,z2_5_0);
-  /* 2^7 - 2^2 */ fsquare(t1,t0);
-  /* 2^8 - 2^3 */ fsquare(t0,t1);
-  /* 2^9 - 2^4 */ fsquare(t1,t0);
-  /* 2^10 - 2^5 */ fsquare(t0,t1);
-  /* 2^10 - 2^0 */ fmul(z2_10_0,t0,z2_5_0);
-
-  /* 2^11 - 2^1 */ fsquare(t0,z2_10_0);
-  /* 2^12 - 2^2 */ fsquare(t1,t0);
-  /* 2^20 - 2^10 */
-  for (i = 2;i < 10;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^20 - 2^0 */ fmul(z2_20_0,t1,z2_10_0);
-
-  /* 2^21 - 2^1 */ fsquare(t0,z2_20_0);
-  /* 2^22 - 2^2 */ fsquare(t1,t0);
-  /* 2^40 - 2^20 */
-  for (i = 2;i < 20;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^40 - 2^0 */ fmul(t0,t1,z2_20_0);
-
-  /* 2^41 - 2^1 */ fsquare(t1,t0);
-  /* 2^42 - 2^2 */ fsquare(t0,t1);
-  /* 2^50 - 2^10 */
-  for (i = 2;i < 10;i += 2) { fsquare(t1,t0); fsquare(t0,t1); }
-  /* 2^50 - 2^0 */ fmul(z2_50_0,t0,z2_10_0);
-
-  /* 2^51 - 2^1 */ fsquare(t0,z2_50_0);
-  /* 2^52 - 2^2 */ fsquare(t1,t0);
-  /* 2^100 - 2^50 */
-  for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^100 - 2^0 */ fmul(z2_100_0,t1,z2_50_0);
-
-  /* 2^101 - 2^1 */ fsquare(t1,z2_100_0);
-  /* 2^102 - 2^2 */ fsquare(t0,t1);
-  /* 2^200 - 2^100 */
-  for (i = 2;i < 100;i += 2) { fsquare(t1,t0); fsquare(t0,t1); }
-  /* 2^200 - 2^0 */ fmul(t1,t0,z2_100_0);
-
-  /* 2^201 - 2^1 */ fsquare(t0,t1);
-  /* 2^202 - 2^2 */ fsquare(t1,t0);
-  /* 2^250 - 2^50 */
-  for (i = 2;i < 50;i += 2) { fsquare(t0,t1); fsquare(t1,t0); }
-  /* 2^250 - 2^0 */ fmul(t0,t1,z2_50_0);
-
-  /* 2^251 - 2^1 */ fsquare(t1,t0);
-  /* 2^252 - 2^2 */ fsquare(t0,t1);
-  /* 2^253 - 2^3 */ fsquare(t1,t0);
-  /* 2^254 - 2^4 */ fsquare(t0,t1);
-  /* 2^255 - 2^5 */ fsquare(t1,t0);
-  /* 2^255 - 21 */ fmul(out,t1,z11);
-}
-
-int
-curve25519_donna(u8 *mypublic, const u8 *secret, const u8 *basepoint) {
-  limb bp[10], x[10], z[10], zmone[10];
-  uint8_t e[32];
-  int i;
-
-  for (i = 0; i < 32; ++i) e[i] = secret[i];
-  e[0] &= 248;
-  e[31] &= 127;
-  e[31] |= 64;
-
-  fexpand(bp, basepoint);
-  cmult(x, z, e, bp);
-  crecip(zmone, z);
-  fmul(z, x, zmone);
-  fcontract(mypublic, z);
-  return 0;
-}
diff --git a/crypto/hmac.cc b/crypto/hmac.cc
index e9869b4..af5580b 100644
--- a/crypto/hmac.cc
+++ b/crypto/hmac.cc
@@ -47,7 +47,7 @@
   if (digest.empty())
     return false;
   size_t digest_length = DigestLength();
-  scoped_ptr<unsigned char[]> computed_digest(
+  std::unique_ptr<unsigned char[]> computed_digest(
       new unsigned char[digest_length]);
   if (!Sign(data, computed_digest.get(), digest_length))
     return false;
diff --git a/crypto/hmac.h b/crypto/hmac.h
index ccdab30..ec32ed7 100644
--- a/crypto/hmac.h
+++ b/crypto/hmac.h
@@ -10,9 +10,10 @@
 
 #include <stddef.h>
 
+#include <memory>
+
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_piece.h"
 #include "crypto/crypto_export.h"
 
@@ -85,7 +86,7 @@
 
  private:
   HashAlgorithm hash_alg_;
-  scoped_ptr<HMACPlatformData> plat_;
+  std::unique_ptr<HMACPlatformData> plat_;
 
   DISALLOW_COPY_AND_ASSIGN(HMAC);
 };
diff --git a/crypto/hmac_openssl.cc b/crypto/hmac_openssl.cc
deleted file mode 100644
index 8c8c11a..0000000
--- a/crypto/hmac_openssl.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/hmac.h"
-
-#include <openssl/hmac.h>
-#include <stddef.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/stl_util.h"
-#include "crypto/openssl_util.h"
-
-namespace crypto {
-
-struct HMACPlatformData {
-  std::vector<unsigned char> key;
-};
-
-HMAC::HMAC(HashAlgorithm hash_alg) : hash_alg_(hash_alg) {
-  // Only SHA-1 and SHA-256 hash algorithms are supported now.
-  DCHECK(hash_alg_ == SHA1 || hash_alg_ == SHA256);
-}
-
-bool HMAC::Init(const unsigned char* key, size_t key_length) {
-  // Init must not be called more than once on the same HMAC object.
-  DCHECK(!plat_);
-  plat_.reset(new HMACPlatformData());
-  plat_->key.assign(key, key + key_length);
-  return true;
-}
-
-HMAC::~HMAC() {
-  if (plat_) {
-    // Zero out key copy.
-    plat_->key.assign(plat_->key.size(), 0);
-    STLClearObject(&plat_->key);
-  }
-}
-
-bool HMAC::Sign(const base::StringPiece& data,
-                unsigned char* digest,
-                size_t digest_length) const {
-  DCHECK(plat_);  // Init must be called before Sign.
-
-  ScopedOpenSSLSafeSizeBuffer<EVP_MAX_MD_SIZE> result(digest, digest_length);
-  return !!::HMAC(hash_alg_ == SHA1 ? EVP_sha1() : EVP_sha256(),
-                  plat_->key.data(), plat_->key.size(),
-                  reinterpret_cast<const unsigned char*>(data.data()),
-                  data.size(), result.safe_buffer(), NULL);
-}
-
-}  // namespace crypto
diff --git a/crypto/hmac_win.cc b/crypto/hmac_win.cc
deleted file mode 100644
index ab29081..0000000
--- a/crypto/hmac_win.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/hmac.h"
-
-#include <windows.h>
-#include <stddef.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "base/logging.h"
-#include "crypto/scoped_capi_types.h"
-#include "crypto/third_party/nss/chromium-blapi.h"
-#include "crypto/third_party/nss/chromium-sha256.h"
-#include "crypto/wincrypt_shim.h"
-
-namespace crypto {
-
-namespace {
-
-// Implementation of HMAC-SHA-256:
-//
-// SHA-256 is supported in Windows XP SP3 or later.  We still need to support
-// Windows XP SP2, so unfortunately we have to implement HMAC-SHA-256 here.
-
-enum {
-  SHA256_BLOCK_SIZE = 64  // Block size (in bytes) of the input to SHA-256.
-};
-
-// NSS doesn't accept size_t for text size, divide the data into smaller
-// chunks as needed.
-void Wrapped_SHA256_Update(SHA256Context* ctx, const unsigned char* text,
-                           size_t text_len) {
-  const unsigned int kChunkSize = 1 << 30;
-  while (text_len > kChunkSize) {
-    SHA256_Update(ctx, text, kChunkSize);
-    text += kChunkSize;
-    text_len -= kChunkSize;
-  }
-  SHA256_Update(ctx, text, (unsigned int)text_len);
-}
-
-// See FIPS 198: The Keyed-Hash Message Authentication Code (HMAC).
-void ComputeHMACSHA256(const unsigned char* key, size_t key_len,
-                       const unsigned char* text, size_t text_len,
-                       unsigned char* output, size_t output_len) {
-  SHA256Context ctx;
-
-  // Pre-process the key, if necessary.
-  unsigned char key0[SHA256_BLOCK_SIZE];
-  if (key_len > SHA256_BLOCK_SIZE) {
-    SHA256_Begin(&ctx);
-    Wrapped_SHA256_Update(&ctx, key, key_len);
-    SHA256_End(&ctx, key0, NULL, SHA256_LENGTH);
-    memset(key0 + SHA256_LENGTH, 0, SHA256_BLOCK_SIZE - SHA256_LENGTH);
-  } else {
-    memcpy(key0, key, key_len);
-    if (key_len < SHA256_BLOCK_SIZE)
-      memset(key0 + key_len, 0, SHA256_BLOCK_SIZE - key_len);
-  }
-
-  unsigned char padded_key[SHA256_BLOCK_SIZE];
-  unsigned char inner_hash[SHA256_LENGTH];
-
-  // XOR key0 with ipad.
-  for (int i = 0; i < SHA256_BLOCK_SIZE; ++i)
-    padded_key[i] = key0[i] ^ 0x36;
-
-  // Compute the inner hash.
-  SHA256_Begin(&ctx);
-  SHA256_Update(&ctx, padded_key, SHA256_BLOCK_SIZE);
-  Wrapped_SHA256_Update(&ctx, text, text_len);
-  SHA256_End(&ctx, inner_hash, NULL, SHA256_LENGTH);
-
-  // XOR key0 with opad.
-  for (int i = 0; i < SHA256_BLOCK_SIZE; ++i)
-    padded_key[i] = key0[i] ^ 0x5c;
-
-  // Compute the outer hash.
-  SHA256_Begin(&ctx);
-  SHA256_Update(&ctx, padded_key, SHA256_BLOCK_SIZE);
-  SHA256_Update(&ctx, inner_hash, SHA256_LENGTH);
-  SHA256_End(&ctx, output, NULL, (unsigned int) output_len);
-}
-
-}  // namespace
-
-struct HMACPlatformData {
-  ~HMACPlatformData() {
-    if (!raw_key_.empty()) {
-      SecureZeroMemory(&raw_key_[0], raw_key_.size());
-    }
-
-    // Destroy the key before releasing the provider.
-    key_.reset();
-  }
-
-  ScopedHCRYPTPROV provider_;
-  ScopedHCRYPTKEY key_;
-
-  // For HMAC-SHA-256 only.
-  std::vector<unsigned char> raw_key_;
-};
-
-HMAC::HMAC(HashAlgorithm hash_alg)
-    : hash_alg_(hash_alg), plat_(new HMACPlatformData()) {
-  // Only SHA-1 and SHA-256 hash algorithms are supported now.
-  DCHECK(hash_alg_ == SHA1 || hash_alg_ == SHA256);
-}
-
-bool HMAC::Init(const unsigned char* key, size_t key_length) {
-  if (plat_->provider_ || plat_->key_ || !plat_->raw_key_.empty()) {
-    // Init must not be called more than once on the same HMAC object.
-    NOTREACHED();
-    return false;
-  }
-
-  if (hash_alg_ == SHA256) {
-    plat_->raw_key_.assign(key, key + key_length);
-    return true;
-  }
-
-  if (!CryptAcquireContext(plat_->provider_.receive(), NULL, NULL,
-                           PROV_RSA_FULL, CRYPT_VERIFYCONTEXT)) {
-    NOTREACHED();
-    return false;
-  }
-
-  // This code doesn't work on Win2k because PLAINTEXTKEYBLOB and
-  // CRYPT_IPSEC_HMAC_KEY are not supported on Windows 2000.  PLAINTEXTKEYBLOB
-  // allows the import of an unencrypted key.  For Win2k support, a cubmbersome
-  // exponent-of-one key procedure must be used:
-  //     http://support.microsoft.com/kb/228786/en-us
-  // CRYPT_IPSEC_HMAC_KEY allows keys longer than 16 bytes.
-
-  struct KeyBlob {
-    BLOBHEADER header;
-    DWORD key_size;
-    BYTE key_data[1];
-  };
-  size_t key_blob_size = std::max(offsetof(KeyBlob, key_data) + key_length,
-                                  sizeof(KeyBlob));
-  std::vector<BYTE> key_blob_storage = std::vector<BYTE>(key_blob_size);
-  KeyBlob* key_blob = reinterpret_cast<KeyBlob*>(&key_blob_storage[0]);
-  key_blob->header.bType = PLAINTEXTKEYBLOB;
-  key_blob->header.bVersion = CUR_BLOB_VERSION;
-  key_blob->header.reserved = 0;
-  key_blob->header.aiKeyAlg = CALG_RC2;
-  key_blob->key_size = static_cast<DWORD>(key_length);
-  memcpy(key_blob->key_data, key, key_length);
-
-  if (!CryptImportKey(plat_->provider_, &key_blob_storage[0],
-                      (DWORD)key_blob_storage.size(), 0,
-                      CRYPT_IPSEC_HMAC_KEY, plat_->key_.receive())) {
-    NOTREACHED();
-    return false;
-  }
-
-  // Destroy the copy of the key.
-  SecureZeroMemory(key_blob->key_data, key_length);
-
-  return true;
-}
-
-HMAC::~HMAC() {
-}
-
-bool HMAC::Sign(const base::StringPiece& data,
-                unsigned char* digest,
-                size_t digest_length) const {
-  if (hash_alg_ == SHA256) {
-    if (plat_->raw_key_.empty())
-      return false;
-    ComputeHMACSHA256(&plat_->raw_key_[0], plat_->raw_key_.size(),
-                      reinterpret_cast<const unsigned char*>(data.data()),
-                      data.size(), digest, digest_length);
-    return true;
-  }
-
-  if (!plat_->provider_ || !plat_->key_)
-    return false;
-
-  if (hash_alg_ != SHA1) {
-    NOTREACHED();
-    return false;
-  }
-
-  ScopedHCRYPTHASH hash;
-  if (!CryptCreateHash(plat_->provider_, CALG_HMAC, plat_->key_, 0,
-                       hash.receive()))
-    return false;
-
-  HMAC_INFO hmac_info;
-  memset(&hmac_info, 0, sizeof(hmac_info));
-  hmac_info.HashAlgid = CALG_SHA1;
-  if (!CryptSetHashParam(hash, HP_HMAC_INFO,
-                         reinterpret_cast<BYTE*>(&hmac_info), 0))
-    return false;
-
-  if (!CryptHashData(hash, reinterpret_cast<const BYTE*>(data.data()),
-                     static_cast<DWORD>(data.size()), 0))
-    return false;
-
-  DWORD sha1_size = static_cast<DWORD>(digest_length);
-  return !!CryptGetHashParam(hash, HP_HASHVAL, digest, &sha1_size, 0);
-}
-
-}  // namespace crypto
diff --git a/crypto/nss_key_util.cc b/crypto/nss_key_util.cc
index 3e03489..1f72667 100644
--- a/crypto/nss_key_util.cc
+++ b/crypto/nss_key_util.cc
@@ -9,6 +9,8 @@
 #include <pk11pub.h>
 #include <stdint.h>
 
+#include <memory>
+
 #include "base/logging.h"
 #include "crypto/nss_util.h"
 
@@ -29,7 +31,7 @@
   }
 };
 
-typedef scoped_ptr<CERTSubjectPublicKeyInfo, PublicKeyInfoDeleter>
+typedef std::unique_ptr<CERTSubjectPublicKeyInfo, PublicKeyInfoDeleter>
     ScopedPublicKeyInfo;
 
 // Decodes |input| as a SubjectPublicKeyInfo and returns a SECItem containing
diff --git a/crypto/nss_util.cc b/crypto/nss_util.cc
index cbc57dc..96ee060 100644
--- a/crypto/nss_util.cc
+++ b/crypto/nss_util.cc
@@ -11,6 +11,8 @@
 #include <prinit.h>
 #include <prtime.h>
 #include <secmod.h>
+
+#include <memory>
 #include <utility>
 
 #include "crypto/nss_util_internal.h"
@@ -36,7 +38,6 @@
 #include "base/files/file_util.h"
 #include "base/lazy_instance.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/native_library.h"
 #include "base/stl_util.h"
@@ -78,7 +79,7 @@
 std::string GetNSSErrorMessage() {
   std::string result;
   if (PR_GetErrorTextLength()) {
-    scoped_ptr<char[]> error_text(new char[PR_GetErrorTextLength() + 1]);
+    std::unique_ptr<char[]> error_text(new char[PR_GetErrorTextLength() + 1]);
     PRInt32 copied = PR_GetErrorText(error_text.get());
     result = std::string(error_text.get(), copied);
   } else {
@@ -170,7 +171,7 @@
 #endif
 
   if (db_on_nfs) {
-    scoped_ptr<base::Environment> env(base::Environment::Create());
+    std::unique_ptr<base::Environment> env(base::Environment::Create());
     static const char kUseCacheEnvVar[] = "NSS_SDB_USE_CACHE";
     if (!env->HasVar(kUseCacheEnvVar))
       env->SetVar(kUseCacheEnvVar, "yes");
@@ -375,7 +376,8 @@
 
     // Note that a reference is not taken to chaps_module_. This is safe since
     // NSSInitSingleton is Leaky, so the reference it holds is never released.
-    scoped_ptr<TPMModuleAndSlot> tpm_args(new TPMModuleAndSlot(chaps_module_));
+    std::unique_ptr<TPMModuleAndSlot> tpm_args(
+        new TPMModuleAndSlot(chaps_module_));
     TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
     if (base::WorkerPool::PostTaskAndReply(
             FROM_HERE,
@@ -421,7 +423,7 @@
 
   void OnInitializedTPMTokenAndSystemSlot(
       const base::Callback<void(bool)>& callback,
-      scoped_ptr<TPMModuleAndSlot> tpm_args) {
+      std::unique_ptr<TPMModuleAndSlot> tpm_args) {
     DCHECK(thread_checker_.CalledOnValidThread());
     DVLOG(2) << "Loaded chaps: " << !!tpm_args->chaps_module
              << ", got tpm slot: " << !!tpm_args->tpm_slot;
@@ -537,7 +539,8 @@
 
     // Note that a reference is not taken to chaps_module_. This is safe since
     // NSSInitSingleton is Leaky, so the reference it holds is never released.
-    scoped_ptr<TPMModuleAndSlot> tpm_args(new TPMModuleAndSlot(chaps_module_));
+    std::unique_ptr<TPMModuleAndSlot> tpm_args(
+        new TPMModuleAndSlot(chaps_module_));
     TPMModuleAndSlot* tpm_args_ptr = tpm_args.get();
     base::WorkerPool::PostTaskAndReply(
         FROM_HERE,
@@ -552,8 +555,9 @@
         );
   }
 
-  void OnInitializedTPMForChromeOSUser(const std::string& username_hash,
-                                       scoped_ptr<TPMModuleAndSlot> tpm_args) {
+  void OnInitializedTPMForChromeOSUser(
+      const std::string& username_hash,
+      std::unique_ptr<TPMModuleAndSlot> tpm_args) {
     DCHECK(thread_checker_.CalledOnValidThread());
     DVLOG(2) << "Got tpm slot for " << username_hash << " "
              << !!tpm_args->tpm_slot;
@@ -806,7 +810,6 @@
     }
   }
 
-#if defined(USE_NSS_CERTS) || defined(OS_IOS)
   // Load nss's built-in root certs.
   SECMODModule* InitDefaultRootCerts() {
     SECMODModule* root = LoadModule("Root Certs", "libnssckbi.so", NULL);
@@ -846,7 +849,6 @@
     }
     return module;
   }
-#endif
 
   bool tpm_token_enabled_for_nss_;
   bool initializing_tpm_token_;
diff --git a/crypto/openssl_util.cc b/crypto/openssl_util.cc
index 2a31093..78c6cbb 100644
--- a/crypto/openssl_util.cc
+++ b/crypto/openssl_util.cc
@@ -4,74 +4,23 @@
 
 #include "crypto/openssl_util.h"
 
-#include <openssl/err.h>
 #if defined(OPENSSL_IS_BORINGSSL)
 #include <openssl/cpu.h>
 #else
 #include <openssl/ssl.h>
 #endif
 #include <openssl/crypto.h>
+#include <openssl/err.h>
 #include <stddef.h>
 #include <stdint.h>
 
 #include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/singleton.h"
 #include "base/strings/string_piece.h"
-#include "build/build_config.h"
-
-#if defined(OS_ANDROID) && defined(ARCH_CPU_ARMEL)
-#include <cpu-features.h>
-#include "base/cpu.h"
-#endif
 
 namespace crypto {
 
 namespace {
 
-// Singleton for initializing and cleaning up the OpenSSL library.
-class OpenSSLInitSingleton {
- public:
-  static OpenSSLInitSingleton* GetInstance() {
-    // We allow the SSL environment to leak for multiple reasons:
-    //   -  it is used from a non-joinable worker thread that is not stopped on
-    //      shutdown, hence may still be using OpenSSL library after the AtExit
-    //      runner has completed.
-    //   -  There are other OpenSSL related singletons (e.g. the client socket
-    //      context) who's cleanup depends on the global environment here, but
-    //      we can't control the order the AtExit handlers will run in so
-    //      allowing the global environment to leak at least ensures it is
-    //      available for those other singletons to reliably cleanup.
-    return base::Singleton<
-        OpenSSLInitSingleton,
-        base::LeakySingletonTraits<OpenSSLInitSingleton>>::get();
-  }
- private:
-  friend struct base::DefaultSingletonTraits<OpenSSLInitSingleton>;
-  OpenSSLInitSingleton() {
-#if defined(OS_ANDROID) && defined(ARCH_CPU_ARMEL)
-    const bool has_neon =
-        (android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON) != 0;
-    base::CPU cpu;
-    // CRYPTO_set_NEON_capable is called before |CRYPTO_library_init| because
-    // this stops BoringSSL from probing for NEON support via SIGILL in the case
-    // that getauxval isn't present. Also workaround a CPU with broken NEON
-    // support. See https://code.google.com/p/chromium/issues/detail?id=341598
-    CRYPTO_set_NEON_capable(has_neon && !cpu.has_broken_neon());
-#endif
-
-#if defined(OPENSSL_IS_BORINGSSL)
-    CRYPTO_library_init();
-#else
-    SSL_library_init();
-#endif
-  }
-
-  ~OpenSSLInitSingleton() {}
-
-  DISALLOW_COPY_AND_ASSIGN(OpenSSLInitSingleton);
-};
-
 // Callback routine for OpenSSL to print error messages. |str| is a
 // NULL-terminated string of length |len| containing diagnostic information
 // such as the library, function and reason for the error, the file and line
@@ -89,7 +38,12 @@
 }  // namespace
 
 void EnsureOpenSSLInit() {
-  (void)OpenSSLInitSingleton::GetInstance();
+#if defined(OPENSSL_IS_BORINGSSL)
+  // CRYPTO_library_init may be safely called concurrently.
+  CRYPTO_library_init();
+#else
+  SSL_library_init();
+#endif
 }
 
 void ClearOpenSSLERRStack(const tracked_objects::Location& location) {
diff --git a/crypto/rsa_private_key_openssl.cc b/crypto/rsa_private_key_openssl.cc
deleted file mode 100644
index 3e87a0a..0000000
--- a/crypto/rsa_private_key_openssl.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/rsa_private_key.h"
-
-#include <openssl/bytestring.h>
-#include <openssl/bn.h>
-#include <openssl/evp.h>
-#include <openssl/mem.h>
-#include <openssl/rsa.h>
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/auto_cbb.h"
-#include "crypto/openssl_util.h"
-#include "crypto/scoped_openssl_types.h"
-
-namespace crypto {
-
-// static
-RSAPrivateKey* RSAPrivateKey::Create(uint16_t num_bits) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-
-  ScopedRSA rsa_key(RSA_new());
-  ScopedBIGNUM bn(BN_new());
-  if (!rsa_key.get() || !bn.get() || !BN_set_word(bn.get(), 65537L))
-    return NULL;
-
-  if (!RSA_generate_key_ex(rsa_key.get(), num_bits, bn.get(), NULL))
-    return NULL;
-
-  scoped_ptr<RSAPrivateKey> result(new RSAPrivateKey);
-  result->key_ = EVP_PKEY_new();
-  if (!result->key_ || !EVP_PKEY_set1_RSA(result->key_, rsa_key.get()))
-    return NULL;
-
-  return result.release();
-}
-
-// static
-RSAPrivateKey* RSAPrivateKey::CreateFromPrivateKeyInfo(
-    const std::vector<uint8_t>& input) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-
-  CBS cbs;
-  CBS_init(&cbs, input.data(), input.size());
-  ScopedEVP_PKEY pkey(EVP_parse_private_key(&cbs));
-  if (!pkey || CBS_len(&cbs) != 0 || EVP_PKEY_id(pkey.get()) != EVP_PKEY_RSA)
-    return nullptr;
-
-  scoped_ptr<RSAPrivateKey> result(new RSAPrivateKey);
-  result->key_ = pkey.release();
-  return result.release();
-}
-
-// static
-RSAPrivateKey* RSAPrivateKey::CreateFromKey(EVP_PKEY* key) {
-  DCHECK(key);
-  if (EVP_PKEY_type(key->type) != EVP_PKEY_RSA)
-    return NULL;
-  RSAPrivateKey* copy = new RSAPrivateKey();
-  copy->key_ = EVP_PKEY_up_ref(key);
-  return copy;
-}
-
-RSAPrivateKey::RSAPrivateKey()
-    : key_(NULL) {
-}
-
-RSAPrivateKey::~RSAPrivateKey() {
-  if (key_)
-    EVP_PKEY_free(key_);
-}
-
-RSAPrivateKey* RSAPrivateKey::Copy() const {
-  scoped_ptr<RSAPrivateKey> copy(new RSAPrivateKey());
-  ScopedRSA rsa(EVP_PKEY_get1_RSA(key_));
-  if (!rsa)
-    return NULL;
-  copy->key_ = EVP_PKEY_new();
-  if (!EVP_PKEY_set1_RSA(copy->key_, rsa.get()))
-    return NULL;
-  return copy.release();
-}
-
-bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
-  uint8_t *der;
-  size_t der_len;
-  AutoCBB cbb;
-  if (!CBB_init(cbb.get(), 0) ||
-      !EVP_marshal_private_key(cbb.get(), key_) ||
-      !CBB_finish(cbb.get(), &der, &der_len)) {
-    return false;
-  }
-  output->assign(der, der + der_len);
-  OPENSSL_free(der);
-  return true;
-}
-
-bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
-  uint8_t *der;
-  size_t der_len;
-  AutoCBB cbb;
-  if (!CBB_init(cbb.get(), 0) ||
-      !EVP_marshal_public_key(cbb.get(), key_) ||
-      !CBB_finish(cbb.get(), &der, &der_len)) {
-    return false;
-  }
-  output->assign(der, der + der_len);
-  OPENSSL_free(der);
-  return true;
-}
-
-}  // namespace crypto
diff --git a/crypto/rsa_private_key_unittest.cc b/crypto/rsa_private_key_unittest.cc
index 1401e3d..393a24c 100644
--- a/crypto/rsa_private_key_unittest.cc
+++ b/crypto/rsa_private_key_unittest.cc
@@ -6,7 +6,8 @@
 
 #include <stdint.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
@@ -71,9 +72,9 @@
 // Generate random private keys with two different sizes. Reimport, then
 // export them again. We should get back the same exact bytes.
 TEST(RSAPrivateKeyUnitTest, InitRandomTest) {
-  scoped_ptr<crypto::RSAPrivateKey> keypair1(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair1(
       crypto::RSAPrivateKey::Create(1024));
-  scoped_ptr<crypto::RSAPrivateKey> keypair2(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair2(
       crypto::RSAPrivateKey::Create(2048));
   ASSERT_TRUE(keypair1.get());
   ASSERT_TRUE(keypair2.get());
@@ -88,9 +89,9 @@
   ASSERT_TRUE(keypair1->ExportPublicKey(&pubkey1));
   ASSERT_TRUE(keypair2->ExportPublicKey(&pubkey2));
 
-  scoped_ptr<crypto::RSAPrivateKey> keypair3(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair3(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(privkey1));
-  scoped_ptr<crypto::RSAPrivateKey> keypair4(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair4(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(privkey2));
   ASSERT_TRUE(keypair3.get());
   ASSERT_TRUE(keypair4.get());
@@ -113,10 +114,10 @@
   std::vector<uint8_t> input(kTestPrivateKeyInfo,
                              kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
 
-  scoped_ptr<crypto::RSAPrivateKey> key_copy(key->Copy());
+  std::unique_ptr<crypto::RSAPrivateKey> key_copy(key->Copy());
   ASSERT_TRUE(key_copy.get());
 
   std::vector<uint8_t> privkey_copy;
@@ -131,7 +132,7 @@
                              kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
   input.push_back(0);
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
 
   // Import should fail.
@@ -158,7 +159,7 @@
       kTestEcPrivateKeyInfo,
       kTestEcPrivateKeyInfo + sizeof(kTestEcPrivateKeyInfo));
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
 
   // Import should fail as the given PKCS8 bytes were for an EC key not RSA key.
@@ -187,7 +188,7 @@
   std::vector<uint8_t> input(kTestPrivateKeyInfo,
                              kTestPrivateKeyInfo + sizeof(kTestPrivateKeyInfo));
 
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input));
   ASSERT_TRUE(key.get());
 
@@ -334,9 +335,9 @@
   memcpy(&input2.front(), short_integer_without_high_bit,
          sizeof(short_integer_without_high_bit));
 
-  scoped_ptr<crypto::RSAPrivateKey> keypair1(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair1(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input1));
-  scoped_ptr<crypto::RSAPrivateKey> keypair2(
+  std::unique_ptr<crypto::RSAPrivateKey> keypair2(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(input2));
   ASSERT_TRUE(keypair1.get());
   ASSERT_TRUE(keypair2.get());
@@ -355,11 +356,11 @@
 }
 
 TEST(RSAPrivateKeyUnitTest, CreateFromKeyTest) {
-  scoped_ptr<crypto::RSAPrivateKey> key_pair(
+  std::unique_ptr<crypto::RSAPrivateKey> key_pair(
       crypto::RSAPrivateKey::Create(512));
   ASSERT_TRUE(key_pair.get());
 
-  scoped_ptr<crypto::RSAPrivateKey> key_copy(
+  std::unique_ptr<crypto::RSAPrivateKey> key_copy(
       crypto::RSAPrivateKey::CreateFromKey(key_pair->key()));
   ASSERT_TRUE(key_copy.get());
 
diff --git a/crypto/scoped_nss_types.h b/crypto/scoped_nss_types.h
index 8e96e8d..a739565 100644
--- a/crypto/scoped_nss_types.h
+++ b/crypto/scoped_nss_types.h
@@ -10,7 +10,7 @@
 #include <pk11pub.h>
 #include <plarena.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
 
 namespace crypto {
 
@@ -29,29 +29,33 @@
 };
 
 // Define some convenient scopers around NSS pointers.
-typedef scoped_ptr<PK11Context,
-                   NSSDestroyer1<PK11Context, PK11_DestroyContext, PR_TRUE> >
+typedef std::unique_ptr<
+    PK11Context,
+    NSSDestroyer1<PK11Context, PK11_DestroyContext, PR_TRUE>>
     ScopedPK11Context;
-typedef scoped_ptr<PK11SlotInfo, NSSDestroyer<PK11SlotInfo, PK11_FreeSlot> >
+typedef std::unique_ptr<PK11SlotInfo, NSSDestroyer<PK11SlotInfo, PK11_FreeSlot>>
     ScopedPK11Slot;
-typedef scoped_ptr<PK11SlotList, NSSDestroyer<PK11SlotList, PK11_FreeSlotList> >
+typedef std::unique_ptr<PK11SlotList,
+                        NSSDestroyer<PK11SlotList, PK11_FreeSlotList>>
     ScopedPK11SlotList;
-typedef scoped_ptr<PK11SymKey, NSSDestroyer<PK11SymKey, PK11_FreeSymKey> >
+typedef std::unique_ptr<PK11SymKey, NSSDestroyer<PK11SymKey, PK11_FreeSymKey>>
     ScopedPK11SymKey;
-typedef scoped_ptr<SECKEYPublicKey,
-                   NSSDestroyer<SECKEYPublicKey, SECKEY_DestroyPublicKey> >
+typedef std::unique_ptr<SECKEYPublicKey,
+                        NSSDestroyer<SECKEYPublicKey, SECKEY_DestroyPublicKey>>
     ScopedSECKEYPublicKey;
-typedef scoped_ptr<SECKEYPrivateKey,
-                   NSSDestroyer<SECKEYPrivateKey, SECKEY_DestroyPrivateKey> >
+typedef std::unique_ptr<
+    SECKEYPrivateKey,
+    NSSDestroyer<SECKEYPrivateKey, SECKEY_DestroyPrivateKey>>
     ScopedSECKEYPrivateKey;
-typedef scoped_ptr<SECAlgorithmID,
-                   NSSDestroyer1<SECAlgorithmID, SECOID_DestroyAlgorithmID,
-                                 PR_TRUE> >
+typedef std::unique_ptr<
+    SECAlgorithmID,
+    NSSDestroyer1<SECAlgorithmID, SECOID_DestroyAlgorithmID, PR_TRUE>>
     ScopedSECAlgorithmID;
-typedef scoped_ptr<SECItem, NSSDestroyer1<SECItem, SECITEM_FreeItem, PR_TRUE> >
+typedef std::unique_ptr<SECItem,
+                        NSSDestroyer1<SECItem, SECITEM_FreeItem, PR_TRUE>>
     ScopedSECItem;
-typedef scoped_ptr<PLArenaPool,
-                   NSSDestroyer1<PLArenaPool, PORT_FreeArena, PR_FALSE> >
+typedef std::unique_ptr<PLArenaPool,
+                        NSSDestroyer1<PLArenaPool, PORT_FreeArena, PR_FALSE>>
     ScopedPLArenaPool;
 
 }  // namespace crypto
diff --git a/crypto/scoped_openssl_types.h b/crypto/scoped_openssl_types.h
index 33b618d..622fed2 100644
--- a/crypto/scoped_openssl_types.h
+++ b/crypto/scoped_openssl_types.h
@@ -17,7 +17,7 @@
 #include <openssl/rsa.h>
 #include <stdint.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
 
 namespace crypto {
 
@@ -31,7 +31,7 @@
 
 template <typename PointerType, void (*Destroyer)(PointerType*)>
 using ScopedOpenSSL =
-    scoped_ptr<PointerType, OpenSSLDestroyer<PointerType, Destroyer>>;
+    std::unique_ptr<PointerType, OpenSSLDestroyer<PointerType, Destroyer>>;
 
 struct OpenSSLFree {
   void operator()(uint8_t* ptr) const { OPENSSL_free(ptr); }
@@ -55,7 +55,7 @@
 using ScopedRSA = ScopedOpenSSL<RSA, RSA_free>;
 
 // The bytes must have been allocated with OPENSSL_malloc.
-using ScopedOpenSSLBytes = scoped_ptr<uint8_t, OpenSSLFree>;
+using ScopedOpenSSLBytes = std::unique_ptr<uint8_t, OpenSSLFree>;
 
 }  // namespace crypto
 
diff --git a/crypto/scoped_test_system_nss_key_slot.h b/crypto/scoped_test_system_nss_key_slot.h
index 99a269c..eb8fbc9 100644
--- a/crypto/scoped_test_system_nss_key_slot.h
+++ b/crypto/scoped_test_system_nss_key_slot.h
@@ -5,8 +5,9 @@
 #ifndef CRYPTO_SCOPED_TEST_SYSTEM_NSS_KEY_SLOT_H_
 #define CRYPTO_SCOPED_TEST_SYSTEM_NSS_KEY_SLOT_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "crypto/crypto_export.h"
 
 // Forward declaration, from <pk11pub.h>
@@ -33,7 +34,7 @@
   PK11SlotInfo* slot() const;
 
  private:
-  scoped_ptr<ScopedTestNSSDB> test_db_;
+  std::unique_ptr<ScopedTestNSSDB> test_db_;
 
   DISALLOW_COPY_AND_ASSIGN(ScopedTestSystemNSSKeySlot);
 };
diff --git a/crypto/secure_hash_openssl.cc b/crypto/secure_hash.cc
similarity index 79%
rename from crypto/secure_hash_openssl.cc
rename to crypto/secure_hash.cc
index 868300f..2bdf3d8 100644
--- a/crypto/secure_hash_openssl.cc
+++ b/crypto/secure_hash.cc
@@ -4,7 +4,11 @@
 
 #include "crypto/secure_hash.h"
 
+#if defined(OPENSSL_IS_BORINGSSL)
 #include <openssl/mem.h>
+#else
+#include <openssl/crypto.h>
+#endif
 #include <openssl/sha.h>
 #include <stddef.h>
 
@@ -16,17 +20,17 @@
 
 namespace {
 
-class SecureHashSHA256OpenSSL : public SecureHash {
+class SecureHashSHA256 : public SecureHash {
  public:
-  SecureHashSHA256OpenSSL() {
+  SecureHashSHA256() {
     SHA256_Init(&ctx_);
   }
 
-  SecureHashSHA256OpenSSL(const SecureHashSHA256OpenSSL& other) {
+  SecureHashSHA256(const SecureHashSHA256& other) : SecureHash() {
     memcpy(&ctx_, &other.ctx_, sizeof(ctx_));
   }
 
-  ~SecureHashSHA256OpenSSL() override {
+  ~SecureHashSHA256() override {
     OPENSSL_cleanse(&ctx_, sizeof(ctx_));
   }
 
@@ -41,7 +45,7 @@
   }
 
   SecureHash* Clone() const override {
-    return new SecureHashSHA256OpenSSL(*this);
+    return new SecureHashSHA256(*this);
   }
 
   size_t GetHashLength() const override { return SHA256_DIGEST_LENGTH; }
@@ -55,7 +59,7 @@
 SecureHash* SecureHash::Create(Algorithm algorithm) {
   switch (algorithm) {
     case SHA256:
-      return new SecureHashSHA256OpenSSL();
+      return new SecureHashSHA256();
     default:
       NOTIMPLEMENTED();
       return NULL;
diff --git a/crypto/secure_hash_default.cc b/crypto/secure_hash_default.cc
deleted file mode 100644
index b33010f..0000000
--- a/crypto/secure_hash_default.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/secure_hash.h"
-
-#include <stddef.h>
-
-#include "base/logging.h"
-#include "base/pickle.h"
-#include "crypto/third_party/nss/chromium-blapi.h"
-#include "crypto/third_party/nss/chromium-sha256.h"
-
-namespace crypto {
-
-namespace {
-
-class SecureHashSHA256NSS : public SecureHash {
- public:
-  SecureHashSHA256NSS() {
-    SHA256_Begin(&ctx_);
-  }
-
-  SecureHashSHA256NSS(const SecureHashSHA256NSS& other) {
-    SHA256_Clone(&ctx_, const_cast<SHA256Context*>(&other.ctx_));
-  }
-
-  ~SecureHashSHA256NSS() override { memset(&ctx_, 0, sizeof(ctx_)); }
-
-  // SecureHash implementation:
-  void Update(const void* input, size_t len) override {
-    SHA256_Update(&ctx_, static_cast<const unsigned char*>(input), len);
-  }
-
-  void Finish(void* output, size_t len) override {
-    SHA256_End(&ctx_, static_cast<unsigned char*>(output), NULL,
-               static_cast<unsigned int>(len));
-  }
-
-  SecureHash* Clone() const override { return new SecureHashSHA256NSS(*this); }
-
-  size_t GetHashLength() const override { return SHA256_LENGTH; }
-
- private:
-  SHA256Context ctx_;
-};
-
-}  // namespace
-
-SecureHash* SecureHash::Create(Algorithm algorithm) {
-  switch (algorithm) {
-    case SHA256:
-      return new SecureHashSHA256NSS();
-    default:
-      NOTIMPLEMENTED();
-      return NULL;
-  }
-}
-
-}  // namespace crypto
diff --git a/crypto/secure_hash_unittest.cc b/crypto/secure_hash_unittest.cc
index 019e86f..cb9f585 100644
--- a/crypto/secure_hash_unittest.cc
+++ b/crypto/secure_hash_unittest.cc
@@ -7,9 +7,9 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
 #include "crypto/sha2.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -23,8 +23,8 @@
 
   uint8_t output3[crypto::kSHA256Length];
 
-  scoped_ptr<crypto::SecureHash> ctx(crypto::SecureHash::Create(
-      crypto::SecureHash::SHA256));
+  std::unique_ptr<crypto::SecureHash> ctx(
+      crypto::SecureHash::Create(crypto::SecureHash::SHA256));
   ctx->Update(input3.data(), input3.size());
   ctx->Update(input3.data(), input3.size());
 
@@ -50,12 +50,12 @@
   uint8_t output2[crypto::kSHA256Length];
   uint8_t output3[crypto::kSHA256Length];
 
-  scoped_ptr<crypto::SecureHash> ctx1(crypto::SecureHash::Create(
-      crypto::SecureHash::SHA256));
+  std::unique_ptr<crypto::SecureHash> ctx1(
+      crypto::SecureHash::Create(crypto::SecureHash::SHA256));
   ctx1->Update(input1.data(), input1.size());
 
-  scoped_ptr<crypto::SecureHash> ctx2(ctx1->Clone());
-  scoped_ptr<crypto::SecureHash> ctx3(ctx2->Clone());
+  std::unique_ptr<crypto::SecureHash> ctx2(ctx1->Clone());
+  std::unique_ptr<crypto::SecureHash> ctx3(ctx2->Clone());
   // At this point, ctx1, ctx2, and ctx3 are all equivalent and represent the
   // state after hashing input1.
 
@@ -76,7 +76,7 @@
 }
 
 TEST(SecureHashTest, TestLength) {
-  scoped_ptr<crypto::SecureHash> ctx(
+  std::unique_ptr<crypto::SecureHash> ctx(
       crypto::SecureHash::Create(crypto::SecureHash::SHA256));
   EXPECT_EQ(crypto::kSHA256Length, ctx->GetHashLength());
 }
diff --git a/crypto/sha2.cc b/crypto/sha2.cc
index 2646d1b..e97b8f4 100644
--- a/crypto/sha2.cc
+++ b/crypto/sha2.cc
@@ -6,14 +6,15 @@
 
 #include <stddef.h>
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "base/stl_util.h"
 #include "crypto/secure_hash.h"
 
 namespace crypto {
 
 void SHA256HashString(const base::StringPiece& str, void* output, size_t len) {
-  scoped_ptr<SecureHash> ctx(SecureHash::Create(SecureHash::SHA256));
+  std::unique_ptr<SecureHash> ctx(SecureHash::Create(SecureHash::SHA256));
   ctx->Update(str.data(), str.length());
   ctx->Finish(output, len);
 }
diff --git a/crypto/signature_creator_openssl.cc b/crypto/signature_creator_openssl.cc
deleted file mode 100644
index d5fc4d4..0000000
--- a/crypto/signature_creator_openssl.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/signature_creator.h"
-
-#include <openssl/evp.h>
-#include <openssl/rsa.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/openssl_util.h"
-#include "crypto/rsa_private_key.h"
-#include "crypto/scoped_openssl_types.h"
-
-namespace crypto {
-
-namespace {
-
-const EVP_MD* ToOpenSSLDigest(SignatureCreator::HashAlgorithm hash_alg) {
-  switch (hash_alg) {
-    case SignatureCreator::SHA1:
-      return EVP_sha1();
-    case SignatureCreator::SHA256:
-      return EVP_sha256();
-  }
-  return NULL;
-}
-
-int ToOpenSSLDigestType(SignatureCreator::HashAlgorithm hash_alg) {
-  switch (hash_alg) {
-    case SignatureCreator::SHA1:
-      return NID_sha1;
-    case SignatureCreator::SHA256:
-      return NID_sha256;
-  }
-  return NID_undef;
-}
-
-}  // namespace
-
-// static
-SignatureCreator* SignatureCreator::Create(RSAPrivateKey* key,
-                                           HashAlgorithm hash_alg) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  scoped_ptr<SignatureCreator> result(new SignatureCreator);
-  const EVP_MD* const digest = ToOpenSSLDigest(hash_alg);
-  DCHECK(digest);
-  if (!digest) {
-    return NULL;
-  }
-  if (!EVP_DigestSignInit(result->sign_context_, NULL, digest, NULL,
-                          key->key())) {
-    return NULL;
-  }
-  return result.release();
-}
-
-// static
-bool SignatureCreator::Sign(RSAPrivateKey* key,
-                            HashAlgorithm hash_alg,
-                            const uint8_t* data,
-                            int data_len,
-                            std::vector<uint8_t>* signature) {
-  ScopedRSA rsa_key(EVP_PKEY_get1_RSA(key->key()));
-  if (!rsa_key)
-    return false;
-  signature->resize(RSA_size(rsa_key.get()));
-
-  unsigned int len = 0;
-  if (!RSA_sign(ToOpenSSLDigestType(hash_alg), data, data_len,
-                signature->data(), &len, rsa_key.get())) {
-    signature->clear();
-    return false;
-  }
-  signature->resize(len);
-  return true;
-}
-
-SignatureCreator::SignatureCreator()
-    : sign_context_(EVP_MD_CTX_create()) {
-}
-
-SignatureCreator::~SignatureCreator() {
-  EVP_MD_CTX_destroy(sign_context_);
-}
-
-bool SignatureCreator::Update(const uint8_t* data_part, int data_part_len) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  return !!EVP_DigestSignUpdate(sign_context_, data_part, data_part_len);
-}
-
-bool SignatureCreator::Final(std::vector<uint8_t>* signature) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-
-  // Determine the maximum length of the signature.
-  size_t len = 0;
-  if (!EVP_DigestSignFinal(sign_context_, NULL, &len)) {
-    signature->clear();
-    return false;
-  }
-  signature->resize(len);
-
-  // Sign it.
-  if (!EVP_DigestSignFinal(sign_context_, signature->data(), &len)) {
-    signature->clear();
-    return false;
-  }
-  signature->resize(len);
-  return true;
-}
-
-}  // namespace crypto
diff --git a/crypto/signature_creator_unittest.cc b/crypto/signature_creator_unittest.cc
index fff065e..819e663 100644
--- a/crypto/signature_creator_unittest.cc
+++ b/crypto/signature_creator_unittest.cc
@@ -2,31 +2,32 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "crypto/signature_creator.h"
+
 #include <stdint.h>
 
+#include <memory>
 #include <vector>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/sha1.h"
 #include "crypto/rsa_private_key.h"
 #include "crypto/sha2.h"
-#include "crypto/signature_creator.h"
 #include "crypto/signature_verifier.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(SignatureCreatorTest, BasicTest) {
   // Do a verify round trip.
-  scoped_ptr<crypto::RSAPrivateKey> key_original(
+  std::unique_ptr<crypto::RSAPrivateKey> key_original(
       crypto::RSAPrivateKey::Create(1024));
   ASSERT_TRUE(key_original.get());
 
   std::vector<uint8_t> key_info;
   key_original->ExportPrivateKey(&key_info);
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
   ASSERT_TRUE(key.get());
 
-  scoped_ptr<crypto::SignatureCreator> signer(
+  std::unique_ptr<crypto::SignatureCreator> signer(
       crypto::SignatureCreator::Create(key.get(),
                                        crypto::SignatureCreator::SHA1));
   ASSERT_TRUE(signer.get());
@@ -53,13 +54,13 @@
 
 TEST(SignatureCreatorTest, SignDigestTest) {
   // Do a verify round trip.
-  scoped_ptr<crypto::RSAPrivateKey> key_original(
+  std::unique_ptr<crypto::RSAPrivateKey> key_original(
       crypto::RSAPrivateKey::Create(1024));
   ASSERT_TRUE(key_original.get());
 
   std::vector<uint8_t> key_info;
   key_original->ExportPrivateKey(&key_info);
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
   ASSERT_TRUE(key.get());
 
@@ -87,13 +88,13 @@
 
 TEST(SignatureCreatorTest, SignSHA256DigestTest) {
   // Do a verify round trip.
-  scoped_ptr<crypto::RSAPrivateKey> key_original(
+  std::unique_ptr<crypto::RSAPrivateKey> key_original(
       crypto::RSAPrivateKey::Create(1024));
   ASSERT_TRUE(key_original.get());
 
   std::vector<uint8_t> key_info;
   key_original->ExportPrivateKey(&key_info);
-  scoped_ptr<crypto::RSAPrivateKey> key(
+  std::unique_ptr<crypto::RSAPrivateKey> key(
       crypto::RSAPrivateKey::CreateFromPrivateKeyInfo(key_info));
   ASSERT_TRUE(key.get());
 
diff --git a/crypto/signature_verifier_openssl.cc b/crypto/signature_verifier_openssl.cc
deleted file mode 100644
index 495abd2..0000000
--- a/crypto/signature_verifier_openssl.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/signature_verifier.h"
-
-#include <openssl/bytestring.h>
-#include <openssl/digest.h>
-#include <openssl/evp.h>
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
-#include "crypto/openssl_util.h"
-#include "crypto/scoped_openssl_types.h"
-
-namespace crypto {
-
-namespace {
-
-const EVP_MD* ToOpenSSLDigest(SignatureVerifier::HashAlgorithm hash_alg) {
-  switch (hash_alg) {
-    case SignatureVerifier::SHA1:
-      return EVP_sha1();
-    case SignatureVerifier::SHA256:
-      return EVP_sha256();
-  }
-  return NULL;
-}
-
-}  // namespace
-
-struct SignatureVerifier::VerifyContext {
-  ScopedEVP_MD_CTX ctx;
-};
-
-SignatureVerifier::SignatureVerifier()
-    : verify_context_(NULL) {
-}
-
-SignatureVerifier::~SignatureVerifier() {
-  Reset();
-}
-
-bool SignatureVerifier::VerifyInit(SignatureAlgorithm signature_algorithm,
-                                   const uint8_t* signature,
-                                   int signature_len,
-                                   const uint8_t* public_key_info,
-                                   int public_key_info_len) {
-  int pkey_type = EVP_PKEY_NONE;
-  const EVP_MD* digest = nullptr;
-  switch (signature_algorithm) {
-    case RSA_PKCS1_SHA1:
-      pkey_type = EVP_PKEY_RSA;
-      digest = EVP_sha1();
-      break;
-    case RSA_PKCS1_SHA256:
-      pkey_type = EVP_PKEY_RSA;
-      digest = EVP_sha256();
-      break;
-    case ECDSA_SHA256:
-      pkey_type = EVP_PKEY_EC;
-      digest = EVP_sha256();
-      break;
-  }
-  DCHECK_NE(EVP_PKEY_NONE, pkey_type);
-  DCHECK(digest);
-
-  return CommonInit(pkey_type, digest, signature, signature_len,
-                    public_key_info, public_key_info_len, nullptr);
-}
-
-bool SignatureVerifier::VerifyInitRSAPSS(HashAlgorithm hash_alg,
-                                         HashAlgorithm mask_hash_alg,
-                                         int salt_len,
-                                         const uint8_t* signature,
-                                         int signature_len,
-                                         const uint8_t* public_key_info,
-                                         int public_key_info_len) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  const EVP_MD* const digest = ToOpenSSLDigest(hash_alg);
-  DCHECK(digest);
-  if (!digest) {
-    return false;
-  }
-
-  EVP_PKEY_CTX* pkey_ctx;
-  if (!CommonInit(EVP_PKEY_RSA, digest, signature, signature_len,
-                  public_key_info, public_key_info_len, &pkey_ctx)) {
-    return false;
-  }
-
-  int rv = EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, RSA_PKCS1_PSS_PADDING);
-  if (rv != 1)
-    return false;
-  const EVP_MD* const mgf_digest = ToOpenSSLDigest(mask_hash_alg);
-  DCHECK(mgf_digest);
-  if (!mgf_digest) {
-    return false;
-  }
-  return EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf_digest) &&
-         EVP_PKEY_CTX_set_rsa_pss_saltlen(pkey_ctx, salt_len);
-}
-
-void SignatureVerifier::VerifyUpdate(const uint8_t* data_part,
-                                     int data_part_len) {
-  DCHECK(verify_context_);
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  int rv = EVP_DigestVerifyUpdate(verify_context_->ctx.get(),
-                                  data_part, data_part_len);
-  DCHECK_EQ(rv, 1);
-}
-
-bool SignatureVerifier::VerifyFinal() {
-  DCHECK(verify_context_);
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  int rv = EVP_DigestVerifyFinal(verify_context_->ctx.get(), signature_.data(),
-                                 signature_.size());
-  DCHECK_EQ(static_cast<int>(!!rv), rv);
-  Reset();
-  return rv == 1;
-}
-
-bool SignatureVerifier::CommonInit(int pkey_type,
-                                   const EVP_MD* digest,
-                                   const uint8_t* signature,
-                                   int signature_len,
-                                   const uint8_t* public_key_info,
-                                   int public_key_info_len,
-                                   EVP_PKEY_CTX** pkey_ctx) {
-  if (verify_context_)
-    return false;
-
-  verify_context_ = new VerifyContext;
-
-  signature_.assign(signature, signature + signature_len);
-
-  CBS cbs;
-  CBS_init(&cbs, public_key_info, public_key_info_len);
-  ScopedEVP_PKEY public_key(EVP_parse_public_key(&cbs));
-  if (!public_key || CBS_len(&cbs) != 0 ||
-      EVP_PKEY_id(public_key.get()) != pkey_type) {
-    return false;
-  }
-
-  verify_context_->ctx.reset(EVP_MD_CTX_create());
-  int rv = EVP_DigestVerifyInit(verify_context_->ctx.get(), pkey_ctx,
-                                digest, nullptr, public_key.get());
-  return rv == 1;
-}
-
-void SignatureVerifier::Reset() {
-  delete verify_context_;
-  verify_context_ = NULL;
-  signature_.clear();
-}
-
-}  // namespace crypto
diff --git a/crypto/symmetric_key_openssl.cc b/crypto/symmetric_key.cc
similarity index 92%
rename from crypto/symmetric_key_openssl.cc
rename to crypto/symmetric_key.cc
index 2c5358f..4da8bd8 100644
--- a/crypto/symmetric_key_openssl.cc
+++ b/crypto/symmetric_key.cc
@@ -10,9 +10,9 @@
 #include <stdint.h>
 
 #include <algorithm>
+#include <memory>
 
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_util.h"
 #include "crypto/openssl_util.h"
 
@@ -40,7 +40,7 @@
     return NULL;
 
   OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  scoped_ptr<SymmetricKey> key(new SymmetricKey);
+  std::unique_ptr<SymmetricKey> key(new SymmetricKey);
   uint8_t* key_data = reinterpret_cast<uint8_t*>(
       base::WriteInto(&key->key_, key_size_in_bytes + 1));
 
@@ -71,13 +71,14 @@
     return NULL;
 
   OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  scoped_ptr<SymmetricKey> key(new SymmetricKey);
+  std::unique_ptr<SymmetricKey> key(new SymmetricKey);
   uint8_t* key_data = reinterpret_cast<uint8_t*>(
       base::WriteInto(&key->key_, key_size_in_bytes + 1));
   int rv = PKCS5_PBKDF2_HMAC_SHA1(
       password.data(), password.length(),
-      reinterpret_cast<const uint8_t*>(salt.data()), salt.length(), iterations,
-      static_cast<int>(key_size_in_bytes), key_data);
+      reinterpret_cast<const uint8_t*>(salt.data()), salt.length(),
+      static_cast<unsigned>(iterations),
+      key_size_in_bytes, key_data);
   return rv == 1 ? key.release() : NULL;
 }
 
@@ -92,7 +93,7 @@
       return NULL;
   }
 
-  scoped_ptr<SymmetricKey> key(new SymmetricKey);
+  std::unique_ptr<SymmetricKey> key(new SymmetricKey);
   key->key_ = raw_key;
   return key.release();
 }
diff --git a/crypto/symmetric_key_unittest.cc b/crypto/symmetric_key_unittest.cc
index ef8e7e1..7cd47cd 100644
--- a/crypto/symmetric_key_unittest.cc
+++ b/crypto/symmetric_key_unittest.cc
@@ -4,15 +4,15 @@
 
 #include "crypto/symmetric_key.h"
 
+#include <memory>
 #include <string>
 
-#include "base/memory/scoped_ptr.h"
 #include "base/strings/string_number_conversions.h"
 #include "base/strings/string_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(SymmetricKeyTest, GenerateRandomKey) {
-  scoped_ptr<crypto::SymmetricKey> key(
+  std::unique_ptr<crypto::SymmetricKey> key(
       crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
   ASSERT_TRUE(NULL != key.get());
   std::string raw_key;
@@ -21,7 +21,7 @@
 
   // Do it again and check that the keys are different.
   // (Note: this has a one-in-10^77 chance of failure!)
-  scoped_ptr<crypto::SymmetricKey> key2(
+  std::unique_ptr<crypto::SymmetricKey> key2(
       crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
   ASSERT_TRUE(NULL != key2.get());
   std::string raw_key2;
@@ -31,13 +31,13 @@
 }
 
 TEST(SymmetricKeyTest, ImportGeneratedKey) {
-  scoped_ptr<crypto::SymmetricKey> key1(
+  std::unique_ptr<crypto::SymmetricKey> key1(
       crypto::SymmetricKey::GenerateRandomKey(crypto::SymmetricKey::AES, 256));
   ASSERT_TRUE(NULL != key1.get());
   std::string raw_key1;
   EXPECT_TRUE(key1->GetRawKey(&raw_key1));
 
-  scoped_ptr<crypto::SymmetricKey> key2(
+  std::unique_ptr<crypto::SymmetricKey> key2(
       crypto::SymmetricKey::Import(crypto::SymmetricKey::AES, raw_key1));
   ASSERT_TRUE(NULL != key2.get());
 
@@ -48,14 +48,14 @@
 }
 
 TEST(SymmetricKeyTest, ImportDerivedKey) {
-  scoped_ptr<crypto::SymmetricKey> key1(
+  std::unique_ptr<crypto::SymmetricKey> key1(
       crypto::SymmetricKey::DeriveKeyFromPassword(
           crypto::SymmetricKey::HMAC_SHA1, "password", "somesalt", 1024, 160));
   ASSERT_TRUE(NULL != key1.get());
   std::string raw_key1;
   EXPECT_TRUE(key1->GetRawKey(&raw_key1));
 
-  scoped_ptr<crypto::SymmetricKey> key2(
+  std::unique_ptr<crypto::SymmetricKey> key2(
       crypto::SymmetricKey::Import(crypto::SymmetricKey::HMAC_SHA1, raw_key1));
   ASSERT_TRUE(NULL != key2.get());
 
@@ -89,10 +89,9 @@
   }
 #endif  // OS_MACOSX
 
-  scoped_ptr<crypto::SymmetricKey> key(
+  std::unique_ptr<crypto::SymmetricKey> key(
       crypto::SymmetricKey::DeriveKeyFromPassword(
-          test_data.algorithm,
-          test_data.password, test_data.salt,
+          test_data.algorithm, test_data.password, test_data.salt,
           test_data.rounds, test_data.key_size_in_bits));
   ASSERT_TRUE(NULL != key.get());
 
diff --git a/crypto/symmetric_key_win.cc b/crypto/symmetric_key_win.cc
deleted file mode 100644
index ac8e614..0000000
--- a/crypto/symmetric_key_win.cc
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "crypto/symmetric_key.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <vector>
-
-// TODO(wtc): replace scoped_array by std::vector.
-#include "base/memory/scoped_ptr.h"
-#include "base/sys_byteorder.h"
-
-namespace crypto {
-
-namespace {
-
-// The following is a non-public Microsoft header documented in MSDN under
-// CryptImportKey / CryptExportKey. Following the header is the byte array of
-// the actual plaintext key.
-struct PlaintextBlobHeader {
-  BLOBHEADER hdr;
-  DWORD cbKeySize;
-};
-
-// CryptoAPI makes use of three distinct ALG_IDs for AES, rather than just
-// CALG_AES (which exists, but depending on the functions you are calling, may
-// result in function failure, whereas the subtype would succeed).
-ALG_ID GetAESAlgIDForKeySize(size_t key_size_in_bits) {
-  // Only AES-128/-192/-256 is supported in CryptoAPI.
-  switch (key_size_in_bits) {
-    case 128:
-      return CALG_AES_128;
-    case 192:
-      return CALG_AES_192;
-    case 256:
-      return CALG_AES_256;
-    default:
-      NOTREACHED();
-      return 0;
-  }
-}
-
-// Imports a raw/plaintext key of |key_size| stored in |*key_data| into a new
-// key created for the specified |provider|. |alg| contains the algorithm of
-// the key being imported.
-// If |key_data| is intended to be used as an HMAC key, then |alg| should be
-// CALG_HMAC.
-// If successful, returns true and stores the imported key in |*key|.
-// TODO(wtc): use this function in hmac_win.cc.
-bool ImportRawKey(HCRYPTPROV provider,
-                  ALG_ID alg,
-                  const void* key_data, size_t key_size,
-                  ScopedHCRYPTKEY* key) {
-  DCHECK_GT(key_size, 0u);
-
-  DWORD actual_size =
-      static_cast<DWORD>(sizeof(PlaintextBlobHeader) + key_size);
-  std::vector<BYTE> tmp_data(actual_size);
-  BYTE* actual_key = &tmp_data[0];
-  memcpy(actual_key + sizeof(PlaintextBlobHeader), key_data, key_size);
-  PlaintextBlobHeader* key_header =
-      reinterpret_cast<PlaintextBlobHeader*>(actual_key);
-  memset(key_header, 0, sizeof(PlaintextBlobHeader));
-
-  key_header->hdr.bType = PLAINTEXTKEYBLOB;
-  key_header->hdr.bVersion = CUR_BLOB_VERSION;
-  key_header->hdr.aiKeyAlg = alg;
-
-  key_header->cbKeySize = static_cast<DWORD>(key_size);
-
-  HCRYPTKEY unsafe_key = NULL;
-  DWORD flags = CRYPT_EXPORTABLE;
-  if (alg == CALG_HMAC) {
-    // Though it may appear odd that IPSEC and RC2 are being used, this is
-    // done in accordance with Microsoft's FIPS 140-2 Security Policy for the
-    // RSA Enhanced Provider, as the approved means of using arbitrary HMAC
-    // key material.
-    key_header->hdr.aiKeyAlg = CALG_RC2;
-    flags |= CRYPT_IPSEC_HMAC_KEY;
-  }
-
-  BOOL ok =
-      CryptImportKey(provider, actual_key, actual_size, 0, flags, &unsafe_key);
-
-  // Clean up the temporary copy of key, regardless of whether it was imported
-  // successfully or not.
-  SecureZeroMemory(actual_key, actual_size);
-
-  if (!ok)
-    return false;
-
-  key->reset(unsafe_key);
-  return true;
-}
-
-// Attempts to generate a random AES key of |key_size_in_bits|. Returns true
-// if generation is successful, storing the generated key in |*key| and the
-// key provider (CSP) in |*provider|.
-bool GenerateAESKey(size_t key_size_in_bits,
-                    ScopedHCRYPTPROV* provider,
-                    ScopedHCRYPTKEY* key) {
-  DCHECK(provider);
-  DCHECK(key);
-
-  ALG_ID alg = GetAESAlgIDForKeySize(key_size_in_bits);
-  if (alg == 0)
-    return false;
-
-  ScopedHCRYPTPROV safe_provider;
-  // Note: The only time NULL is safe to be passed as pszContainer is when
-  // dwFlags contains CRYPT_VERIFYCONTEXT, as all keys generated and/or used
-  // will be treated as ephemeral keys and not persisted.
-  BOOL ok = CryptAcquireContext(safe_provider.receive(), NULL, NULL,
-                                PROV_RSA_AES, CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return false;
-
-  ScopedHCRYPTKEY safe_key;
-  // In the FIPS 140-2 Security Policy for CAPI on XP/Vista+, Microsoft notes
-  // that CryptGenKey makes use of the same functionality exposed via
-  // CryptGenRandom. The reason this is being used, as opposed to
-  // CryptGenRandom and CryptImportKey is for compliance with the security
-  // policy
-  ok = CryptGenKey(safe_provider.get(), alg, CRYPT_EXPORTABLE,
-                   safe_key.receive());
-  if (!ok)
-    return false;
-
-  key->swap(safe_key);
-  provider->swap(safe_provider);
-
-  return true;
-}
-
-// Returns true if the HMAC key size meets the requirement of FIPS 198
-// Section 3.  |alg| is the hash function used in the HMAC.
-bool CheckHMACKeySize(size_t key_size_in_bits, ALG_ID alg) {
-  DWORD hash_size = 0;
-  switch (alg) {
-    case CALG_SHA1:
-      hash_size = 20;
-      break;
-    case CALG_SHA_256:
-      hash_size = 32;
-      break;
-    case CALG_SHA_384:
-      hash_size = 48;
-      break;
-    case CALG_SHA_512:
-      hash_size = 64;
-      break;
-  }
-  if (hash_size == 0)
-    return false;
-
-  // An HMAC key must be >= L/2, where L is the output size of the hash
-  // function being used.
-  return (key_size_in_bits >= (hash_size / 2 * 8) &&
-         (key_size_in_bits % 8) == 0);
-}
-
-// Attempts to generate a random, |key_size_in_bits|-long HMAC key, for use
-// with the hash function |alg|.
-// |key_size_in_bits| must be >= 1/2 the hash size of |alg| for security.
-// Returns true if generation is successful, storing the generated key in
-// |*key| and the key provider (CSP) in |*provider|.
-bool GenerateHMACKey(size_t key_size_in_bits,
-                     ALG_ID alg,
-                     ScopedHCRYPTPROV* provider,
-                     ScopedHCRYPTKEY* key,
-                     scoped_ptr<BYTE[]>* raw_key) {
-  DCHECK(provider);
-  DCHECK(key);
-  DCHECK(raw_key);
-
-  if (!CheckHMACKeySize(key_size_in_bits, alg))
-    return false;
-
-  ScopedHCRYPTPROV safe_provider;
-  // See comment in GenerateAESKey as to why NULL is acceptable for the
-  // container name.
-  BOOL ok = CryptAcquireContext(safe_provider.receive(), NULL, NULL,
-                                PROV_RSA_FULL, CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return false;
-
-  DWORD key_size_in_bytes = static_cast<DWORD>(key_size_in_bits / 8);
-  scoped_ptr<BYTE[]> random(new BYTE[key_size_in_bytes]);
-  ok = CryptGenRandom(safe_provider, key_size_in_bytes, random.get());
-  if (!ok)
-    return false;
-
-  ScopedHCRYPTKEY safe_key;
-  bool rv = ImportRawKey(safe_provider, CALG_HMAC, random.get(),
-                         key_size_in_bytes, &safe_key);
-  if (rv) {
-    key->swap(safe_key);
-    provider->swap(safe_provider);
-    raw_key->swap(random);
-  }
-
-  SecureZeroMemory(random.get(), key_size_in_bytes);
-  return rv;
-}
-
-// Attempts to create an HMAC hash instance using the specified |provider|
-// and |key|. The inner hash function will be |hash_alg|. If successful,
-// returns true and stores the hash in |*hash|.
-// TODO(wtc): use this function in hmac_win.cc.
-bool CreateHMACHash(HCRYPTPROV provider,
-                    HCRYPTKEY key,
-                    ALG_ID hash_alg,
-                    ScopedHCRYPTHASH* hash) {
-  ScopedHCRYPTHASH safe_hash;
-  BOOL ok = CryptCreateHash(provider, CALG_HMAC, key, 0, safe_hash.receive());
-  if (!ok)
-    return false;
-
-  HMAC_INFO hmac_info;
-  memset(&hmac_info, 0, sizeof(hmac_info));
-  hmac_info.HashAlgid = hash_alg;
-
-  ok = CryptSetHashParam(safe_hash, HP_HMAC_INFO,
-                         reinterpret_cast<const BYTE*>(&hmac_info), 0);
-  if (!ok)
-    return false;
-
-  hash->swap(safe_hash);
-  return true;
-}
-
-// Computes a block of the derived key using the PBKDF2 function F for the
-// specified |block_index| using the PRF |hash|, writing the output to
-// |output_buf|.
-// |output_buf| must have enough space to accomodate the output of the PRF
-// specified by |hash|.
-// Returns true if the block was successfully computed.
-bool ComputePBKDF2Block(HCRYPTHASH hash,
-                        DWORD hash_size,
-                        const std::string& salt,
-                        size_t iterations,
-                        uint32_t block_index,
-                        BYTE* output_buf) {
-  // From RFC 2898:
-  // 3. <snip> The function F is defined as the exclusive-or sum of the first
-  //    c iterates of the underlying pseudorandom function PRF applied to the
-  //    password P and the concatenation of the salt S and the block index i:
-  //      F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
-  //    where
-  //      U_1 = PRF(P, S || INT (i))
-  //      U_2 = PRF(P, U_1)
-  //      ...
-  //      U_c = PRF(P, U_{c-1})
-  ScopedHCRYPTHASH safe_hash;
-  BOOL ok = CryptDuplicateHash(hash, NULL, 0, safe_hash.receive());
-  if (!ok)
-    return false;
-
-  // Iteration U_1: Compute PRF for S.
-  ok = CryptHashData(safe_hash, reinterpret_cast<const BYTE*>(salt.data()),
-                     static_cast<DWORD>(salt.size()), 0);
-  if (!ok)
-    return false;
-
-  // Iteration U_1: and append (big-endian) INT (i).
-  uint32_t big_endian_block_index = base::HostToNet32(block_index);
-  ok = CryptHashData(safe_hash,
-                     reinterpret_cast<BYTE*>(&big_endian_block_index),
-                     sizeof(big_endian_block_index), 0);
-
-  std::vector<BYTE> hash_value(hash_size);
-
-  DWORD size = hash_size;
-  ok = CryptGetHashParam(safe_hash, HP_HASHVAL, &hash_value[0], &size, 0);
-  if (!ok  || size != hash_size)
-    return false;
-
-  memcpy(output_buf, &hash_value[0], hash_size);
-
-  // Iteration 2 - c: Compute U_{iteration} by applying the PRF to
-  // U_{iteration - 1}, then xor the resultant hash with |output|, which
-  // contains U_1 ^ U_2 ^ ... ^ U_{iteration - 1}.
-  for (size_t iteration = 2; iteration <= iterations; ++iteration) {
-    safe_hash.reset();
-    ok = CryptDuplicateHash(hash, NULL, 0, safe_hash.receive());
-    if (!ok)
-      return false;
-
-    ok = CryptHashData(safe_hash, &hash_value[0], hash_size, 0);
-    if (!ok)
-      return false;
-
-    size = hash_size;
-    ok = CryptGetHashParam(safe_hash, HP_HASHVAL, &hash_value[0], &size, 0);
-    if (!ok || size != hash_size)
-      return false;
-
-    for (DWORD i = 0; i < hash_size; ++i)
-      output_buf[i] ^= hash_value[i];
-  }
-
-  return true;
-}
-
-}  // namespace
-
-SymmetricKey::~SymmetricKey() {
-  // TODO(wtc): create a "secure" string type that zeroes itself in the
-  // destructor.
-  if (!raw_key_.empty())
-    SecureZeroMemory(const_cast<char *>(raw_key_.data()), raw_key_.size());
-}
-
-// static
-SymmetricKey* SymmetricKey::GenerateRandomKey(Algorithm algorithm,
-                                              size_t key_size_in_bits) {
-  DCHECK_GE(key_size_in_bits, 8u);
-
-  ScopedHCRYPTPROV provider;
-  ScopedHCRYPTKEY key;
-
-  bool ok = false;
-  scoped_ptr<BYTE[]> raw_key;
-
-  switch (algorithm) {
-    case AES:
-      ok = GenerateAESKey(key_size_in_bits, &provider, &key);
-      break;
-    case HMAC_SHA1:
-      ok = GenerateHMACKey(key_size_in_bits, CALG_SHA1, &provider,
-                           &key, &raw_key);
-      break;
-  }
-
-  if (!ok) {
-    NOTREACHED();
-    return NULL;
-  }
-
-  size_t key_size_in_bytes = key_size_in_bits / 8;
-  if (raw_key == NULL)
-    key_size_in_bytes = 0;
-
-  SymmetricKey* result = new SymmetricKey(provider.release(),
-                                          key.release(),
-                                          raw_key.get(),
-                                          key_size_in_bytes);
-  if (raw_key != NULL)
-    SecureZeroMemory(raw_key.get(), key_size_in_bytes);
-
-  return result;
-}
-
-// static
-SymmetricKey* SymmetricKey::DeriveKeyFromPassword(Algorithm algorithm,
-                                                  const std::string& password,
-                                                  const std::string& salt,
-                                                  size_t iterations,
-                                                  size_t key_size_in_bits) {
-  // CryptoAPI lacks routines to perform PBKDF2 derivation as specified
-  // in RFC 2898, so it must be manually implemented. Only HMAC-SHA1 is
-  // supported as the PRF.
-
-  // While not used until the end, sanity-check the input before proceeding
-  // with the expensive computation.
-  DWORD provider_type = 0;
-  ALG_ID alg = 0;
-  switch (algorithm) {
-    case AES:
-      provider_type = PROV_RSA_AES;
-      alg = GetAESAlgIDForKeySize(key_size_in_bits);
-      break;
-    case HMAC_SHA1:
-      provider_type = PROV_RSA_FULL;
-      alg = CALG_HMAC;
-      break;
-    default:
-      NOTREACHED();
-      break;
-  }
-  if (provider_type == 0 || alg == 0)
-    return NULL;
-
-  ScopedHCRYPTPROV provider;
-  BOOL ok = CryptAcquireContext(provider.receive(), NULL, NULL, provider_type,
-                                CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return NULL;
-
-  // Convert the user password into a key suitable to be fed into the PRF
-  // function.
-  ScopedHCRYPTKEY password_as_key;
-  BYTE* password_as_bytes =
-      const_cast<BYTE*>(reinterpret_cast<const BYTE*>(password.data()));
-  if (!ImportRawKey(provider, CALG_HMAC, password_as_bytes,
-                    password.size(), &password_as_key))
-    return NULL;
-
-  // Configure the PRF function. Only HMAC variants are supported, with the
-  // only hash function supported being SHA1.
-  // TODO(rsleevi): Support SHA-256 on XP SP3+.
-  ScopedHCRYPTHASH prf;
-  if (!CreateHMACHash(provider, password_as_key, CALG_SHA1, &prf))
-    return NULL;
-
-  DWORD hLen = 0;
-  DWORD param_size = sizeof(hLen);
-  ok = CryptGetHashParam(prf, HP_HASHSIZE,
-                         reinterpret_cast<BYTE*>(&hLen), &param_size, 0);
-  if (!ok || hLen == 0)
-    return NULL;
-
-  // 1. If dkLen > (2^32 - 1) * hLen, output "derived key too long" and stop.
-  size_t dkLen = key_size_in_bits / 8;
-  DCHECK_GT(dkLen, 0u);
-
-  if ((dkLen / hLen) > 0xFFFFFFFF) {
-    DLOG(ERROR) << "Derived key too long.";
-    return NULL;
-  }
-
-  // 2. Let l be the number of hLen-octet blocks in the derived key,
-  //    rounding up, and let r be the number of octets in the last
-  //    block:
-  size_t L = (dkLen + hLen - 1) / hLen;
-  DCHECK_GT(L, 0u);
-
-  size_t total_generated_size = L * hLen;
-  std::vector<BYTE> generated_key(total_generated_size);
-  BYTE* block_offset = &generated_key[0];
-
-  // 3. For each block of the derived key apply the function F defined below
-  //    to the password P, the salt S, the iteration count c, and the block
-  //    index to compute the block:
-  //    T_1 = F (P, S, c, 1)
-  //    T_2 = F (P, S, c, 2)
-  //    ...
-  //    T_l = F (P, S, c, l)
-  // <snip>
-  // 4. Concatenate the blocks and extract the first dkLen octets to produce
-  //    a derived key DK:
-  //    DK = T_1 || T_2 || ... || T_l<0..r-1>
-  for (uint32_t block_index = 1; block_index <= L; ++block_index) {
-    if (!ComputePBKDF2Block(prf, hLen, salt, iterations, block_index,
-                            block_offset))
-        return NULL;
-    block_offset += hLen;
-  }
-
-  // Convert the derived key bytes into a key handle for the desired algorithm.
-  ScopedHCRYPTKEY key;
-  if (!ImportRawKey(provider, alg, &generated_key[0], dkLen, &key))
-    return NULL;
-
-  SymmetricKey* result = new SymmetricKey(provider.release(), key.release(),
-                                          &generated_key[0], dkLen);
-
-  SecureZeroMemory(&generated_key[0], total_generated_size);
-
-  return result;
-}
-
-// static
-SymmetricKey* SymmetricKey::Import(Algorithm algorithm,
-                                   const std::string& raw_key) {
-  DWORD provider_type = 0;
-  ALG_ID alg = 0;
-  switch (algorithm) {
-    case AES:
-      provider_type = PROV_RSA_AES;
-      alg = GetAESAlgIDForKeySize(raw_key.size() * 8);
-      break;
-    case HMAC_SHA1:
-      provider_type = PROV_RSA_FULL;
-      alg = CALG_HMAC;
-      break;
-    default:
-      NOTREACHED();
-      break;
-  }
-  if (provider_type == 0 || alg == 0)
-    return NULL;
-
-  ScopedHCRYPTPROV provider;
-  BOOL ok = CryptAcquireContext(provider.receive(), NULL, NULL, provider_type,
-                                CRYPT_VERIFYCONTEXT);
-  if (!ok)
-    return NULL;
-
-  ScopedHCRYPTKEY key;
-  if (!ImportRawKey(provider, alg, raw_key.data(), raw_key.size(), &key))
-    return NULL;
-
-  return new SymmetricKey(provider.release(), key.release(),
-                          raw_key.data(), raw_key.size());
-}
-
-bool SymmetricKey::GetRawKey(std::string* raw_key) {
-  // Short circuit for when the key was supplied to the constructor.
-  if (!raw_key_.empty()) {
-    *raw_key = raw_key_;
-    return true;
-  }
-
-  DWORD size = 0;
-  BOOL ok = CryptExportKey(key_, 0, PLAINTEXTKEYBLOB, 0, NULL, &size);
-  if (!ok)
-    return false;
-
-  std::vector<BYTE> result(size);
-
-  ok = CryptExportKey(key_, 0, PLAINTEXTKEYBLOB, 0, &result[0], &size);
-  if (!ok)
-    return false;
-
-  PlaintextBlobHeader* header =
-      reinterpret_cast<PlaintextBlobHeader*>(&result[0]);
-  raw_key->assign(reinterpret_cast<char*>(&result[sizeof(*header)]),
-                  header->cbKeySize);
-
-  SecureZeroMemory(&result[0], size);
-
-  return true;
-}
-
-SymmetricKey::SymmetricKey(HCRYPTPROV provider,
-                           HCRYPTKEY key,
-                           const void* key_data, size_t key_size_in_bytes)
-    : provider_(provider), key_(key) {
-  if (key_data) {
-    raw_key_.assign(reinterpret_cast<const char*>(key_data),
-                    key_size_in_bytes);
-  }
-}
-
-}  // namespace crypto
diff --git a/crypto/third_party/nss/LICENSE b/crypto/third_party/nss/LICENSE
deleted file mode 100644
index 0367164..0000000
--- a/crypto/third_party/nss/LICENSE
+++ /dev/null
@@ -1,35 +0,0 @@
-/* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1994-2000
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
diff --git a/crypto/third_party/nss/README.chromium b/crypto/third_party/nss/README.chromium
deleted file mode 100644
index 1a63665..0000000
--- a/crypto/third_party/nss/README.chromium
+++ /dev/null
@@ -1,18 +0,0 @@
-Name: Network Security Services (NSS)
-URL: http://www.mozilla.org/projects/security/pki/nss/
-License: MPL 1.1/GPL 2.0/LGPL 2.1
-
-We extracted the SHA-256 source files, eliminated unneeded dependencies,
-deleted or commented out unused code, and tweaked them for Chrome's source
-tree.  sha512.c is renamed sha512.cc so that it can include Chrome's C++
-header "base/basictypes.h".  We define NOUNROLL256 to reduce the object code
-size.
-
-In blapi.h and sha512.cc, replaced uint32 by unsigned int so that they can
-be compiled with -DNO_NSPR_10_SUPPORT.  NO_NSPR_10_SUPPORT turns off the
-definition of the NSPR 1.0 types int8 - int64 and uint8 - uint64 to avoid
-conflict with the same-named types defined in "base/basictypes.h".
-
-rsawrapr.c is copied from nss/lib/softoken/rsawrapr.c, with
-HASH_GetRawHashObject changed to HASH_GetHashObject. It contains the
-emsa_pss_verify function for verifying RSA-PSS signatures.
diff --git a/crypto/third_party/nss/pk11akey.cc b/crypto/third_party/nss/pk11akey.cc
deleted file mode 100644
index 4db582f..0000000
--- a/crypto/third_party/nss/pk11akey.cc
+++ /dev/null
@@ -1,98 +0,0 @@
- /* ***** BEGIN LICENSE BLOCK *****
-  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
-  *
-  * The contents of this file are subject to the Mozilla Public License Version
-  * 1.1 (the "License"); you may not use this file except in compliance with
-  * the License. You may obtain a copy of the License at
-  * http://www.mozilla.org/MPL/
-  *
-  * Software distributed under the License is distributed on an "AS IS" basis,
-  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-  * for the specific language governing rights and limitations under the
-  * License.
-  *
-  * The Original Code is the Netscape security libraries.
-  *
-  * The Initial Developer of the Original Code is
-  * Netscape Communications Corporation.
-  * Portions created by the Initial Developer are Copyright (C) 1994-2000
-  * the Initial Developer. All Rights Reserved.
-  *
-  * Contributor(s):
-  *   Dr Stephen Henson <stephen.henson@gemplus.com>
-  *   Dr Vipul Gupta <vipul.gupta@sun.com>, and
-  *   Douglas Stebila <douglas@stebila.ca>, Sun Microsystems Laboratories
-  *
-  * Alternatively, the contents of this file may be used under the terms of
-  * either the GNU General Public License Version 2 or later (the "GPL"), or
-  * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-  * in which case the provisions of the GPL or the LGPL are applicable instead
-  * of those above. If you wish to allow use of your version of this file only
-  * under the terms of either the GPL or the LGPL, and not to allow others to
-  * use your version of this file under the terms of the MPL, indicate your
-  * decision by deleting the provisions above and replace them with the notice
-  * and other provisions required by the GPL or the LGPL. If you do not delete
-  * the provisions above, a recipient may use your version of this file under
-  * the terms of any one of the MPL, the GPL or the LGPL.
-  *
-  * ***** END LICENSE BLOCK ***** */
-
-#include "crypto/third_party/nss/chromium-nss.h"
-
-#include <pk11pub.h>
-
-#include "base/logging.h"
-
-// Based on PK11_ImportEncryptedPrivateKeyInfo function in
-// mozilla/security/nss/lib/pk11wrap/pk11akey.c.
-SECStatus ImportEncryptedECPrivateKeyInfoAndReturnKey(
-    PK11SlotInfo* slot,
-    SECKEYEncryptedPrivateKeyInfo* epki,
-    SECItem* password,
-    SECItem* nickname,
-    SECItem* public_value,
-    PRBool permanent,
-    PRBool sensitive,
-    SECKEYPrivateKey** private_key,
-    void* wincx) {
-  SECItem* crypto_param = NULL;
-
-  CK_ATTRIBUTE_TYPE usage = CKA_SIGN;
-
-  PK11SymKey* key = PK11_PBEKeyGen(slot,
-                                   &epki->algorithm,
-                                   password,
-                                   PR_FALSE,  // faulty3DES
-                                   wincx);
-  if (key == NULL) {
-    DLOG(ERROR) << "PK11_PBEKeyGen: " << PORT_GetError();
-    return SECFailure;
-  }
-
-  CK_MECHANISM_TYPE crypto_mech_type = PK11_GetPBECryptoMechanism(
-      &epki->algorithm, &crypto_param, password);
-  if (crypto_mech_type == CKM_INVALID_MECHANISM) {
-    DLOG(ERROR) << "PK11_GetPBECryptoMechanism: " << PORT_GetError();
-    PK11_FreeSymKey(key);
-    return SECFailure;
-  }
-
-  crypto_mech_type = PK11_GetPadMechanism(crypto_mech_type);
-
-  *private_key = PK11_UnwrapPrivKey(slot, key, crypto_mech_type, crypto_param,
-                                    &epki->encryptedData, nickname,
-                                    public_value, permanent, sensitive, CKK_EC,
-                                    &usage, 1, wincx);
-
-  if (crypto_param != NULL)
-    SECITEM_ZfreeItem(crypto_param, PR_TRUE);
-
-  PK11_FreeSymKey(key);
-
-  if (!*private_key) {
-    DLOG(ERROR) << "PK11_UnwrapPrivKey: " << PORT_GetError();
-    return SECFailure;
-  }
-
-  return SECSuccess;
-}
diff --git a/crypto/third_party/nss/secsign.cc b/crypto/third_party/nss/secsign.cc
deleted file mode 100644
index c9816fb..0000000
--- a/crypto/third_party/nss/secsign.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Signature stuff.
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is the Netscape security libraries.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1994-2000
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- *   Dr Vipul Gupta <vipul.gupta@sun.com>, Sun Microsystems Laboratories
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-#include "crypto/third_party/nss/chromium-nss.h"
-
-#include <vector>
-
-#include <cryptohi.h>
-#include <pk11pub.h>
-#include <secerr.h>
-#include <sechash.h>
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "build/build_config.h"
-
-SECStatus DerSignData(PLArenaPool *arena,
-                      SECItem *result,
-                      SECItem *input,
-                      SECKEYPrivateKey *key,
-                      SECOidTag algo_id) {
-  if (key->keyType != ecKey) {
-    return SEC_DerSignData(arena, result, input->data, input->len, key,
-                           algo_id);
-  }
-
-  // NSS has a private function sec_DecodeSigAlg it uses to figure out the
-  // correct hash from the algorithm id.
-  HASH_HashType hash_type;
-  switch (algo_id) {
-    case SEC_OID_ANSIX962_ECDSA_SHA1_SIGNATURE:
-      hash_type = HASH_AlgSHA1;
-      break;
-#ifdef SHA224_LENGTH
-    case SEC_OID_ANSIX962_ECDSA_SHA224_SIGNATURE:
-      hash_type = HASH_AlgSHA224;
-      break;
-#endif
-    case SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE:
-      hash_type = HASH_AlgSHA256;
-      break;
-    case SEC_OID_ANSIX962_ECDSA_SHA384_SIGNATURE:
-      hash_type = HASH_AlgSHA384;
-      break;
-    case SEC_OID_ANSIX962_ECDSA_SHA512_SIGNATURE:
-      hash_type = HASH_AlgSHA512;
-      break;
-    default:
-      PORT_SetError(SEC_ERROR_INVALID_ALGORITHM);
-      return SECFailure;
-  }
-
-  // Hash the input.
-  std::vector<uint8_t> hash_data(HASH_ResultLen(hash_type));
-  SECStatus rv = HASH_HashBuf(
-      hash_type, &hash_data[0], input->data, input->len);
-  if (rv != SECSuccess)
-    return rv;
-  SECItem hash = {siBuffer, &hash_data[0], 
-		  static_cast<unsigned int>(hash_data.size())};
-
-  // Compute signature of hash.
-  int signature_len = PK11_SignatureLen(key);
-  std::vector<uint8_t> signature_data(signature_len);
-  SECItem sig = {siBuffer, &signature_data[0], 
-		 static_cast<unsigned int>(signature_len)};
-  rv = PK11_Sign(key, &sig, &hash);
-  if (rv != SECSuccess)
-    return rv;
-
-  CERTSignedData sd;
-  PORT_Memset(&sd, 0, sizeof(sd));
-  // Fill in tbsCertificate.
-  sd.data.data = (unsigned char*) input->data;
-  sd.data.len = input->len;
-
-  // Fill in signatureAlgorithm.
-  rv = SECOID_SetAlgorithmID(arena, &sd.signatureAlgorithm, algo_id, 0);
-  if (rv != SECSuccess)
-    return rv;
-
-  // Fill in signatureValue.
-  rv = DSAU_EncodeDerSigWithLen(&sd.signature, &sig, sig.len);
-  if (rv != SECSuccess)
-    return rv;
-  sd.signature.len <<=  3;  // Convert to bit string.
-
-  // DER encode the signed data object.
-  void* encode_result = SEC_ASN1EncodeItem(
-      arena, result, &sd, SEC_ASN1_GET(CERT_SignedDataTemplate));
-
-  PORT_Free(sd.signature.data);
-
-  return encode_result ? SECSuccess : SECFailure;
-}
diff --git a/dbus/BUILD.gn b/dbus/BUILD.gn
index dbeee0c..1502ca7 100644
--- a/dbus/BUILD.gn
+++ b/dbus/BUILD.gn
@@ -45,7 +45,7 @@
     "//base",
   ]
 
-  public_configs = [ "//build/config/linux:dbus" ]
+  public_configs = [ "//build/config/linux/dbus" ]
 }
 
 proto_library("test_proto") {
@@ -76,7 +76,7 @@
     "//testing/gmock",
   ]
 
-  configs += [ "//build/config/linux:dbus" ]
+  configs += [ "//build/config/linux/dbus" ]
 }
 
 test("dbus_unittests") {
@@ -109,7 +109,7 @@
     "//third_party/protobuf:protobuf_lite",
   ]
 
-  configs += [ "//build/config/linux:dbus" ]
+  configs += [ "//build/config/linux/dbus" ]
 }
 
 executable("dbus_test_server") {
@@ -127,5 +127,5 @@
     "//build/config/sanitizers:deps",
   ]
 
-  configs += [ "//build/config/linux:dbus" ]
+  configs += [ "//build/config/linux/dbus" ]
 }
diff --git a/dbus/bus.cc b/dbus/bus.cc
index 8781eae..bb5afb1 100644
--- a/dbus/bus.cc
+++ b/dbus/bus.cc
@@ -78,13 +78,13 @@
 
  private:
   // Implement MessagePumpLibevent::Watcher.
-  void OnFileCanReadWithoutBlocking(int /* file_descriptor */) override {
+  void OnFileCanReadWithoutBlocking(int /*file_descriptor*/) override {
     const bool success = dbus_watch_handle(raw_watch_, DBUS_WATCH_READABLE);
     CHECK(success) << "Unable to allocate memory";
   }
 
   // Implement MessagePumpLibevent::Watcher.
-  void OnFileCanWriteWithoutBlocking(int /* file_descriptor */) override {
+  void OnFileCanWriteWithoutBlocking(int /*file_descriptor*/) override {
     const bool success = dbus_watch_handle(raw_watch_, DBUS_WATCH_WRITABLE);
     CHECK(success) << "Unable to allocate memory";
   }
@@ -878,7 +878,8 @@
     return "";
   }
 
-  scoped_ptr<Response> response(Response::FromRawMessage(response_message));
+  std::unique_ptr<Response> response(
+      Response::FromRawMessage(response_message));
   MessageReader reader(response.get());
 
   std::string service_owner;
@@ -1081,7 +1082,7 @@
 }
 
 void Bus::OnDispatchStatusChanged(DBusConnection* connection,
-                                  DBusDispatchStatus /* status */) {
+                                  DBusDispatchStatus /*status*/) {
   DCHECK_EQ(connection, connection_);
   AssertOnDBusThread();
 
@@ -1101,7 +1102,7 @@
   // |message| will be unrefed on exit of the function. Increment the
   // reference so we can use it in Signal::FromRawMessage() below.
   dbus_message_ref(message);
-  scoped_ptr<Signal> signal(Signal::FromRawMessage(message));
+  std::unique_ptr<Signal> signal(Signal::FromRawMessage(message));
 
   // Confirm the validity of the NameOwnerChanged signal.
   if (signal->GetMember() != kNameOwnerChangedSignal ||
@@ -1178,7 +1179,7 @@
 
 // static
 DBusHandlerResult Bus::OnServiceOwnerChangedFilter(
-    DBusConnection* /* connection */,
+    DBusConnection* /*connection*/,
     DBusMessage* message,
     void* data) {
   if (dbus_message_is_signal(message,
diff --git a/dbus/bus.h b/dbus/bus.h
index e5e0b1c..7d39159 100644
--- a/dbus/bus.h
+++ b/dbus/bus.h
@@ -88,7 +88,7 @@
 //       bus.GetObjectProxy(service_name, object_path);
 //
 //   dbus::MethodCall method_call(interface_name, method_name);
-//   scoped_ptr<dbus::Response> response(
+//   std::unique_ptr<dbus::Response> response(
 //       object_proxy.CallMethodAndBlock(&method_call, timeout_ms));
 //   if (response.get() != NULL) {  // Success.
 //     ...
diff --git a/dbus/dbus_statistics.cc b/dbus/dbus_statistics.cc
index e6eb5a2..e1e0973 100644
--- a/dbus/dbus_statistics.cc
+++ b/dbus/dbus_statistics.cc
@@ -4,11 +4,11 @@
 
 #include "dbus/dbus_statistics.h"
 
+#include <memory>
 #include <set>
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/threading/platform_thread.h"
@@ -108,7 +108,7 @@
                 const std::string& method,
                 bool add_stat) {
     DCHECK_EQ(origin_thread_id_, base::PlatformThread::CurrentId());
-    scoped_ptr<Stat> stat(new Stat(service, interface, method));
+    std::unique_ptr<Stat> stat(new Stat(service, interface, method));
     StatSet::iterator found = stats_.find(stat.get());
     if (found != stats_.end())
       return *found;
diff --git a/dbus/exported_object.cc b/dbus/exported_object.cc
index 889792a..b156308 100644
--- a/dbus/exported_object.cc
+++ b/dbus/exported_object.cc
@@ -169,8 +169,7 @@
 
   ScopedDBusError error;
 
-  DBusObjectPathVTable vtable;
-  memset(&vtable, 0, sizeof(vtable));
+  DBusObjectPathVTable vtable = {};
   vtable.message_function = &ExportedObject::HandleMessageThunk;
   vtable.unregister_function = &ExportedObject::OnUnregisteredThunk;
   const bool success = bus_->TryRegisterObjectPath(object_path_,
@@ -187,16 +186,15 @@
   return true;
 }
 
-DBusHandlerResult ExportedObject::HandleMessage(
-    DBusConnection* /* connection */,
-    DBusMessage* raw_message) {
+DBusHandlerResult ExportedObject::HandleMessage(DBusConnection*,
+                                                DBusMessage* raw_message) {
   bus_->AssertOnDBusThread();
   DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_CALL, dbus_message_get_type(raw_message));
 
   // raw_message will be unrefed on exit of the function. Increment the
   // reference so we can use it in MethodCall.
   dbus_message_ref(raw_message);
-  scoped_ptr<MethodCall> method_call(
+  std::unique_ptr<MethodCall> method_call(
       MethodCall::FromRawMessage(raw_message));
   const std::string interface = method_call->GetInterface();
   const std::string member = method_call->GetMember();
@@ -242,7 +240,7 @@
 }
 
 void ExportedObject::RunMethod(MethodCallCallback method_call_callback,
-                               scoped_ptr<MethodCall> method_call,
+                               std::unique_ptr<MethodCall> method_call,
                                base::TimeTicks start_time) {
   bus_->AssertOnOriginThread();
   MethodCall* method = method_call.get();
@@ -254,8 +252,8 @@
 }
 
 void ExportedObject::SendResponse(base::TimeTicks start_time,
-                                  scoped_ptr<MethodCall> method_call,
-                                  scoped_ptr<Response> response) {
+                                  std::unique_ptr<MethodCall> method_call,
+                                  std::unique_ptr<Response> response) {
   DCHECK(method_call);
   if (bus_->HasDBusThread()) {
     bus_->GetDBusTaskRunner()->PostTask(
@@ -270,8 +268,8 @@
   }
 }
 
-void ExportedObject::OnMethodCompleted(scoped_ptr<MethodCall> method_call,
-                                       scoped_ptr<Response> response,
+void ExportedObject::OnMethodCompleted(std::unique_ptr<MethodCall> method_call,
+                                       std::unique_ptr<Response> response,
                                        base::TimeTicks start_time) {
   bus_->AssertOnDBusThread();
 
@@ -287,11 +285,9 @@
 
   if (!response) {
     // Something bad happened in the method call.
-    scoped_ptr<ErrorResponse> error_response(
-        ErrorResponse::FromMethodCall(
-            method_call.get(),
-            DBUS_ERROR_FAILED,
-            "error occurred in " + method_call->GetMember()));
+    std::unique_ptr<ErrorResponse> error_response(ErrorResponse::FromMethodCall(
+        method_call.get(), DBUS_ERROR_FAILED,
+        "error occurred in " + method_call->GetMember()));
     bus_->Send(error_response->raw_message(), NULL);
     return;
   }
@@ -304,8 +300,7 @@
                       base::TimeTicks::Now() - start_time);
 }
 
-void ExportedObject::OnUnregistered(DBusConnection* /* connection */) {
-}
+void ExportedObject::OnUnregistered(DBusConnection*) {}
 
 DBusHandlerResult ExportedObject::HandleMessageThunk(
     DBusConnection* connection,
diff --git a/dbus/exported_object.h b/dbus/exported_object.h
index 89de096..69a63a5 100644
--- a/dbus/exported_object.h
+++ b/dbus/exported_object.h
@@ -8,6 +8,7 @@
 #include <dbus/dbus.h>
 
 #include <map>
+#include <memory>
 #include <string>
 #include <utility>
 
@@ -41,7 +42,8 @@
   // Called to send a response from an exported method. |response| is the
   // response message. Callers should pass NULL in the event of an error that
   // prevents the sending of a response.
-  typedef base::Callback<void (scoped_ptr<Response> response)> ResponseSender;
+  typedef base::Callback<void(std::unique_ptr<Response> response)>
+      ResponseSender;
 
   // Called when an exported method is called. |method_call| is the request
   // message. |sender| is the callback that's used to send a response.
@@ -138,20 +140,20 @@
 
   // Runs the method. Helper function for HandleMessage().
   void RunMethod(MethodCallCallback method_call_callback,
-                 scoped_ptr<MethodCall> method_call,
+                 std::unique_ptr<MethodCall> method_call,
                  base::TimeTicks start_time);
 
   // Callback invoked by service provider to send a response to a method call.
   // Can be called immediately from a MethodCallCallback to implement a
   // synchronous service or called later to implement an asynchronous service.
   void SendResponse(base::TimeTicks start_time,
-                    scoped_ptr<MethodCall> method_call,
-                    scoped_ptr<Response> response);
+                    std::unique_ptr<MethodCall> method_call,
+                    std::unique_ptr<Response> response);
 
   // Called on completion of the method run from SendResponse().
   // Takes ownership of |method_call| and |response|.
-  void OnMethodCompleted(scoped_ptr<MethodCall> method_call,
-                         scoped_ptr<Response> response,
+  void OnMethodCompleted(std::unique_ptr<MethodCall> method_call,
+                         std::unique_ptr<Response> response,
                          base::TimeTicks start_time);
 
   // Called when the object is unregistered.
diff --git a/dbus/file_descriptor.h b/dbus/file_descriptor.h
index b4f95cb..8fcab2f 100644
--- a/dbus/file_descriptor.h
+++ b/dbus/file_descriptor.h
@@ -5,7 +5,8 @@
 #ifndef DBUS_FILE_DESCRIPTOR_H_
 #define DBUS_FILE_DESCRIPTOR_H_
 
-#include "base/memory/scoped_ptr.h"
+#include <memory>
+
 #include "base/move.h"
 #include "dbus/dbus_export.h"
 
@@ -84,7 +85,7 @@
 };
 
 using ScopedFileDescriptor =
-    scoped_ptr<FileDescriptor, FileDescriptor::Deleter>;
+    std::unique_ptr<FileDescriptor, FileDescriptor::Deleter>;
 
 }  // namespace dbus
 
diff --git a/dbus/message.cc b/dbus/message.cc
index 8a58dba..4a84756 100644
--- a/dbus/message.cc
+++ b/dbus/message.cc
@@ -398,23 +398,23 @@
 Response::Response() : Message() {
 }
 
-scoped_ptr<Response> Response::FromRawMessage(DBusMessage* raw_message) {
+std::unique_ptr<Response> Response::FromRawMessage(DBusMessage* raw_message) {
   DCHECK_EQ(DBUS_MESSAGE_TYPE_METHOD_RETURN,
             dbus_message_get_type(raw_message));
 
-  scoped_ptr<Response> response(new Response);
+  std::unique_ptr<Response> response(new Response);
   response->Init(raw_message);
   return response;
 }
 
-scoped_ptr<Response> Response::FromMethodCall(MethodCall* method_call) {
-  scoped_ptr<Response> response(new Response);
+std::unique_ptr<Response> Response::FromMethodCall(MethodCall* method_call) {
+  std::unique_ptr<Response> response(new Response);
   response->Init(dbus_message_new_method_return(method_call->raw_message()));
   return response;
 }
 
-scoped_ptr<Response> Response::CreateEmpty() {
-  scoped_ptr<Response> response(new Response);
+std::unique_ptr<Response> Response::CreateEmpty() {
+  std::unique_ptr<Response> response(new Response);
   response->Init(dbus_message_new(DBUS_MESSAGE_TYPE_METHOD_RETURN));
   return response;
 }
@@ -426,20 +426,20 @@
 ErrorResponse::ErrorResponse() : Response() {
 }
 
-scoped_ptr<ErrorResponse> ErrorResponse::FromRawMessage(
+std::unique_ptr<ErrorResponse> ErrorResponse::FromRawMessage(
     DBusMessage* raw_message) {
   DCHECK_EQ(DBUS_MESSAGE_TYPE_ERROR, dbus_message_get_type(raw_message));
 
-  scoped_ptr<ErrorResponse> response(new ErrorResponse);
+  std::unique_ptr<ErrorResponse> response(new ErrorResponse);
   response->Init(raw_message);
   return response;
 }
 
-scoped_ptr<ErrorResponse> ErrorResponse::FromMethodCall(
+std::unique_ptr<ErrorResponse> ErrorResponse::FromMethodCall(
     MethodCall* method_call,
     const std::string& error_name,
     const std::string& error_message) {
-  scoped_ptr<ErrorResponse> response(new ErrorResponse);
+  std::unique_ptr<ErrorResponse> response(new ErrorResponse);
   response->Init(dbus_message_new_error(method_call->raw_message(),
                                         error_name.c_str(),
                                         error_message.c_str()));
@@ -599,6 +599,19 @@
   CloseContainer(&array_writer);
 }
 
+void MessageWriter::AppendArrayOfDoubles(const double* values, size_t length) {
+  DCHECK(!container_is_open_);
+  MessageWriter array_writer(message_);
+  OpenArray("d", &array_writer);
+  const bool success = dbus_message_iter_append_fixed_array(
+      &(array_writer.raw_message_iter_),
+      DBUS_TYPE_DOUBLE,
+      &values,
+      static_cast<int>(length));
+  CHECK(success) << "Unable to allocate memory";
+  CloseContainer(&array_writer);
+}
+
 void MessageWriter::AppendArrayOfStrings(
     const std::vector<std::string>& strings) {
   DCHECK(!container_is_open_);
@@ -822,7 +835,26 @@
   dbus_message_iter_get_fixed_array(&array_reader.raw_message_iter_,
                                     bytes,
                                     &int_length);
-  *length = static_cast<int>(int_length);
+  *length = static_cast<size_t>(int_length);
+  return true;
+}
+
+bool MessageReader::PopArrayOfDoubles(const double** doubles, size_t* length) {
+  MessageReader array_reader(message_);
+  if (!PopArray(&array_reader))
+    return false;
+  if (!array_reader.HasMoreData()) {
+    *length = 0;
+    *doubles = nullptr;
+    return true;
+  }
+  if (!array_reader.CheckDataType(DBUS_TYPE_DOUBLE))
+    return false;
+  int int_length = 0;
+  dbus_message_iter_get_fixed_array(&array_reader.raw_message_iter_,
+                                    doubles,
+                                    &int_length);
+  *length = static_cast<size_t>(int_length);
   return true;
 }
 
diff --git a/dbus/message.h b/dbus/message.h
index 7dffe0e..0aa010c 100644
--- a/dbus/message.h
+++ b/dbus/message.h
@@ -8,11 +8,12 @@
 #include <dbus/dbus.h>
 #include <stddef.h>
 #include <stdint.h>
+
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "dbus/dbus_export.h"
 #include "dbus/file_descriptor.h"
 #include "dbus/object_path.h"
@@ -204,16 +205,16 @@
  public:
   // Returns a newly created Response from the given raw message of the
   // type DBUS_MESSAGE_TYPE_METHOD_RETURN. Takes the ownership of |raw_message|.
-  static scoped_ptr<Response> FromRawMessage(DBusMessage* raw_message);
+  static std::unique_ptr<Response> FromRawMessage(DBusMessage* raw_message);
 
   // Returns a newly created Response from the given method call.
   // Used for implementing exported methods. Does NOT take the ownership of
   // |method_call|.
-  static scoped_ptr<Response> FromMethodCall(MethodCall* method_call);
+  static std::unique_ptr<Response> FromMethodCall(MethodCall* method_call);
 
   // Returns a newly created Response with an empty payload.
   // Useful for testing.
-  static scoped_ptr<Response> CreateEmpty();
+  static std::unique_ptr<Response> CreateEmpty();
 
  protected:
   // Creates a Response message. The internal raw message is NULL.
@@ -229,13 +230,14 @@
  public:
   // Returns a newly created Response from the given raw message of the
   // type DBUS_MESSAGE_TYPE_METHOD_RETURN. Takes the ownership of |raw_message|.
-  static scoped_ptr<ErrorResponse> FromRawMessage(DBusMessage* raw_message);
+  static std::unique_ptr<ErrorResponse> FromRawMessage(
+      DBusMessage* raw_message);
 
   // Returns a newly created ErrorResponse from the given method call, the
   // error name, and the error message.  The error name looks like
   // "org.freedesktop.DBus.Error.Failed". Used for returning an error to a
   // failed method call. Does NOT take the ownership of |method_call|.
-  static scoped_ptr<ErrorResponse> FromMethodCall(
+  static std::unique_ptr<ErrorResponse> FromMethodCall(
       MethodCall* method_call,
       const std::string& error_name,
       const std::string& error_message);
@@ -312,6 +314,9 @@
   // function.
   void AppendArrayOfBytes(const uint8_t* values, size_t length);
 
+  // Appends the array of doubles. Used for audio mixer matrix doubles.
+  void AppendArrayOfDoubles(const double* values, size_t length);
+
   // Appends the array of strings. Arrays of strings are often used for
   // exchanging lists of names hence it's worth having a specialized
   // function.
@@ -415,6 +420,9 @@
   // after the MessageReader is destroyed.
   bool PopArrayOfBytes(const uint8_t** bytes, size_t* length);
 
+  // Gets the array of doubles at the current iterator position.
+  bool PopArrayOfDoubles(const double** doubles, size_t* length);
+
   // Gets the array of strings at the current iterator position. |strings| is
   // cleared before being modified. Returns true and advances the iterator on
   // success.
diff --git a/dbus/mock_object_proxy.h b/dbus/mock_object_proxy.h
index 66f485a..f27f6f6 100644
--- a/dbus/mock_object_proxy.h
+++ b/dbus/mock_object_proxy.h
@@ -21,27 +21,28 @@
                   const std::string& service_name,
                   const ObjectPath& object_path);
 
-  // GMock doesn't support the return type of scoped_ptr<> because scoped_ptr is
-  // uncopyable. This is a workaround which defines |MockCallMethodAndBlock| as
-  // a mock method and makes |CallMethodAndBlock| call the mocked method.
-  // Use |MockCallMethodAndBlock| for setting/testing expectations.
+  // GMock doesn't support the return type of std::unique_ptr<> because
+  // std::unique_ptr is uncopyable. This is a workaround which defines
+  // |MockCallMethodAndBlock| as a mock method and makes
+  // |CallMethodAndBlock| call the mocked method.  Use |MockCallMethodAndBlock|
+  // for setting/testing expectations.
   MOCK_METHOD3(MockCallMethodAndBlockWithErrorDetails,
                Response*(MethodCall* method_call,
                          int timeout_ms,
                          ScopedDBusError* error));
-  scoped_ptr<Response> CallMethodAndBlockWithErrorDetails(
+  std::unique_ptr<Response> CallMethodAndBlockWithErrorDetails(
       MethodCall* method_call,
       int timeout_ms,
       ScopedDBusError* error) override {
-    return scoped_ptr<Response>(
+    return std::unique_ptr<Response>(
         MockCallMethodAndBlockWithErrorDetails(method_call, timeout_ms, error));
   }
   MOCK_METHOD2(MockCallMethodAndBlock, Response*(MethodCall* method_call,
                                                  int timeout_ms));
-  scoped_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
-                                          int timeout_ms) override {
-    return scoped_ptr<Response>(MockCallMethodAndBlock(method_call,
-                                                       timeout_ms));
+  std::unique_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
+                                               int timeout_ms) override {
+    return std::unique_ptr<Response>(
+        MockCallMethodAndBlock(method_call, timeout_ms));
   }
   MOCK_METHOD3(CallMethod, void(MethodCall* method_call,
                                 int timeout_ms,
diff --git a/dbus/object_manager.cc b/dbus/object_manager.cc
index 34b881c..178bb5f 100644
--- a/dbus/object_manager.cc
+++ b/dbus/object_manager.cc
@@ -249,7 +249,7 @@
   return self->HandleMessage(connection, raw_message);
 }
 
-DBusHandlerResult ObjectManager::HandleMessage(DBusConnection* /* connection */,
+DBusHandlerResult ObjectManager::HandleMessage(DBusConnection*,
                                                DBusMessage* raw_message) {
   DCHECK(bus_);
   bus_->AssertOnDBusThread();
@@ -263,8 +263,7 @@
   // raw_message will be unrefed on exit of the function. Increment the
   // reference so we can use it in Signal.
   dbus_message_ref(raw_message);
-  scoped_ptr<Signal> signal(
-      Signal::FromRawMessage(raw_message));
+  std::unique_ptr<Signal> signal(Signal::FromRawMessage(raw_message));
 
   const std::string interface = signal->GetInterface();
   const std::string member = signal->GetMember();
@@ -387,8 +386,8 @@
 }
 
 void ObjectManager::InterfacesAddedConnected(
-    const std::string& /* interface_name */,
-    const std::string& /* signal_name */,
+    const std::string& /*interface_name*/,
+    const std::string& /*signal_name*/,
     bool success) {
   LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
                             << ": Failed to connect to InterfacesAdded signal.";
@@ -412,8 +411,8 @@
 }
 
 void ObjectManager::InterfacesRemovedConnected(
-    const std::string& /* interface_name */,
-    const std::string& /* signal_name */,
+    const std::string& /*interface_name*/,
+    const std::string& /*signal_name*/,
     bool success) {
   LOG_IF(WARNING, !success) << service_name_ << " " << object_path_.value()
                             << ": Failed to connect to "
diff --git a/dbus/object_manager.h b/dbus/object_manager.h
index 25834c5..a97495e 100644
--- a/dbus/object_manager.h
+++ b/dbus/object_manager.h
@@ -166,8 +166,8 @@
     // called on each interface implementation with differing values of
     // |interface_name| as appropriate. An implementation class will only
     // receive multiple calls if it has registered for multiple interfaces.
-    virtual void ObjectAdded(const ObjectPath& /* object_path */,
-                             const std::string& /* interface_name */) { }
+    virtual void ObjectAdded(const ObjectPath& /*object_path*/,
+                             const std::string& /*interface_name*/) {}
 
     // Called by ObjectManager to inform the implementation class than an
     // object with the path |object_path| has been removed. Ths D-Bus interface
@@ -178,8 +178,8 @@
     // This method will be called before the Properties structure and the
     // ObjectProxy object for the given interface are cleaned up, it is safe
     // to retrieve them during removal to vary processing.
-    virtual void ObjectRemoved(const ObjectPath& /* object_path */,
-                               const std::string& /* interface_name */) { }
+    virtual void ObjectRemoved(const ObjectPath& /*object_path*/,
+                               const std::string& /*interface_name*/) {}
   };
 
   // Client code should use Bus::GetObjectManager() instead of this constructor.
diff --git a/dbus/object_proxy.cc b/dbus/object_proxy.cc
index e30c9fd..ce02551 100644
--- a/dbus/object_proxy.cc
+++ b/dbus/object_proxy.cc
@@ -69,14 +69,16 @@
 // Originally we tried to make |method_call| a const reference, but we
 // gave up as dbus_connection_send_with_reply_and_block() takes a
 // non-const pointer of DBusMessage as the second parameter.
-scoped_ptr<Response> ObjectProxy::CallMethodAndBlockWithErrorDetails(
-    MethodCall* method_call, int timeout_ms, ScopedDBusError* error) {
+std::unique_ptr<Response> ObjectProxy::CallMethodAndBlockWithErrorDetails(
+    MethodCall* method_call,
+    int timeout_ms,
+    ScopedDBusError* error) {
   bus_->AssertOnDBusThread();
 
   if (!bus_->Connect() ||
       !method_call->SetDestination(service_name_) ||
       !method_call->SetPath(object_path_))
-    return scoped_ptr<Response>();
+    return std::unique_ptr<Response>();
 
   DBusMessage* request_message = method_call->raw_message();
 
@@ -97,7 +99,7 @@
                          method_call->GetMember(),
                          error->is_set() ? error->name() : "unknown error type",
                          error->is_set() ? error->message() : "");
-    return scoped_ptr<Response>();
+    return std::unique_ptr<Response>();
   }
   // Record time spent for the method call. Don't include failures.
   UMA_HISTOGRAM_TIMES("DBus.SyncMethodCallTime",
@@ -106,8 +108,9 @@
   return Response::FromRawMessage(response_message);
 }
 
-scoped_ptr<Response> ObjectProxy::CallMethodAndBlock(MethodCall* method_call,
-                                                     int timeout_ms) {
+std::unique_ptr<Response> ObjectProxy::CallMethodAndBlock(
+    MethodCall* method_call,
+    int timeout_ms) {
   ScopedDBusError error;
   return CallMethodAndBlockWithErrorDetails(method_call, timeout_ms, &error);
 }
@@ -325,7 +328,7 @@
   } else if (dbus_message_get_type(response_message) ==
              DBUS_MESSAGE_TYPE_ERROR) {
     // This will take |response_message| and release (unref) it.
-    scoped_ptr<ErrorResponse> error_response(
+    std::unique_ptr<ErrorResponse> error_response(
         ErrorResponse::FromRawMessage(response_message));
     error_callback.Run(error_response.get());
     // Delete the message  on the D-Bus thread. See below for why.
@@ -335,7 +338,8 @@
                    error_response.release()));
   } else {
     // This will take |response_message| and release (unref) it.
-    scoped_ptr<Response> response(Response::FromRawMessage(response_message));
+    std::unique_ptr<Response> response(
+        Response::FromRawMessage(response_message));
     // The response is successfully received.
     response_callback.Run(response.get());
     // The message should be deleted on the D-Bus thread for a complicated
@@ -455,9 +459,8 @@
   }
 }
 
-DBusHandlerResult ObjectProxy::HandleMessage(
-    DBusConnection* /* connection */,
-    DBusMessage* raw_message) {
+DBusHandlerResult ObjectProxy::HandleMessage(DBusConnection*,
+                                             DBusMessage* raw_message) {
   bus_->AssertOnDBusThread();
 
   if (dbus_message_get_type(raw_message) != DBUS_MESSAGE_TYPE_SIGNAL)
@@ -466,8 +469,7 @@
   // raw_message will be unrefed on exit of the function. Increment the
   // reference so we can use it in Signal.
   dbus_message_ref(raw_message);
-  scoped_ptr<Signal> signal(
-      Signal::FromRawMessage(raw_message));
+  std::unique_ptr<Signal> signal(Signal::FromRawMessage(raw_message));
 
   // Verify the signal comes from the object we're proxying for, this is
   // our last chance to return DBUS_HANDLER_RESULT_NOT_YET_HANDLED and
@@ -659,7 +661,7 @@
 }
 
 DBusHandlerResult ObjectProxy::HandleNameOwnerChanged(
-    scoped_ptr<Signal> signal) {
+    std::unique_ptr<Signal> signal) {
   DCHECK(signal);
   bus_->AssertOnDBusThread();
 
diff --git a/dbus/object_proxy.h b/dbus/object_proxy.h
index edb97a5..033e886 100644
--- a/dbus/object_proxy.h
+++ b/dbus/object_proxy.h
@@ -8,6 +8,7 @@
 #include <dbus/dbus.h>
 
 #include <map>
+#include <memory>
 #include <set>
 #include <string>
 #include <vector>
@@ -97,7 +98,7 @@
   // in the |error| object.
   //
   // BLOCKING CALL.
-  virtual scoped_ptr<Response> CallMethodAndBlockWithErrorDetails(
+  virtual std::unique_ptr<Response> CallMethodAndBlockWithErrorDetails(
       MethodCall* method_call,
       int timeout_ms,
       ScopedDBusError* error);
@@ -106,8 +107,8 @@
   // is returned. Returns NULL on error.
   //
   // BLOCKING CALL.
-  virtual scoped_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
-                                                  int timeout_ms);
+  virtual std::unique_ptr<Response> CallMethodAndBlock(MethodCall* method_call,
+                                                       int timeout_ms);
 
   // Requests to call the method of the remote object.
   //
@@ -289,7 +290,8 @@
   void UpdateNameOwnerAndBlock();
 
   // Handles NameOwnerChanged signal from D-Bus's special message bus.
-  DBusHandlerResult HandleNameOwnerChanged(scoped_ptr<dbus::Signal> signal);
+  DBusHandlerResult HandleNameOwnerChanged(
+      std::unique_ptr<dbus::Signal> signal);
 
   // Runs |name_owner_changed_callback_|.
   void RunNameOwnerChangedCallback(const std::string& old_owner,
diff --git a/dbus/property.cc b/dbus/property.cc
index 156d0c7..aa58436 100644
--- a/dbus/property.cc
+++ b/dbus/property.cc
@@ -89,7 +89,7 @@
   }
 }
 
-void PropertySet::ChangedConnected(const std::string& /* interface_name */,
+void PropertySet::ChangedConnected(const std::string& /*interface_name*/,
                                    const std::string& signal_name,
                                    bool success) {
   LOG_IF(WARNING, !success) << "Failed to connect to " << signal_name
@@ -141,9 +141,8 @@
   writer.AppendString(property->name());
 
   DCHECK(object_proxy_);
-  scoped_ptr<dbus::Response> response(
-      object_proxy_->CallMethodAndBlock(&method_call,
-                                        ObjectProxy::TIMEOUT_USE_DEFAULT));
+  std::unique_ptr<dbus::Response> response(object_proxy_->CallMethodAndBlock(
+      &method_call, ObjectProxy::TIMEOUT_USE_DEFAULT));
 
   if (!response.get()) {
     LOG(WARNING) << property->name() << ": GetAndBlock: failed.";
@@ -212,9 +211,8 @@
   property->AppendSetValueToWriter(&writer);
 
   DCHECK(object_proxy_);
-  scoped_ptr<dbus::Response> response(
-      object_proxy_->CallMethodAndBlock(&method_call,
-                                        ObjectProxy::TIMEOUT_USE_DEFAULT));
+  std::unique_ptr<dbus::Response> response(object_proxy_->CallMethodAndBlock(
+      &method_call, ObjectProxy::TIMEOUT_USE_DEFAULT));
   if (response.get())
     return true;
   return false;
diff --git a/dbus/values_util.cc b/dbus/values_util.cc
index e932312..ed435a1 100644
--- a/dbus/values_util.cc
+++ b/dbus/values_util.cc
@@ -4,9 +4,10 @@
 
 #include "dbus/values_util.h"
 
+#include <memory>
+
 #include "base/json/json_writer.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/values.h"
 #include "dbus/message.h"
 
@@ -47,7 +48,7 @@
         return false;
     } else {
       // If the type of keys is not STRING, convert it to string.
-      scoped_ptr<base::Value> key(PopDataAsValue(&entry_reader));
+      std::unique_ptr<base::Value> key(PopDataAsValue(&entry_reader));
       if (!key)
         return false;
       // Use JSONWriter to convert an arbitrary value to a string.
@@ -176,12 +177,12 @@
         // If the type of the array's element is DICT_ENTRY, create a
         // DictionaryValue, otherwise create a ListValue.
         if (sub_reader.GetDataType() == Message::DICT_ENTRY) {
-          scoped_ptr<base::DictionaryValue> dictionary_value(
+          std::unique_ptr<base::DictionaryValue> dictionary_value(
               new base::DictionaryValue);
           if (PopDictionaryEntries(&sub_reader, dictionary_value.get()))
             result = dictionary_value.release();
         } else {
-          scoped_ptr<base::ListValue> list_value(new base::ListValue);
+          std::unique_ptr<base::ListValue> list_value(new base::ListValue);
           if (PopListElements(&sub_reader, list_value.get()))
             result = list_value.release();
         }
@@ -191,7 +192,7 @@
     case Message::STRUCT: {
       MessageReader sub_reader(NULL);
       if (reader->PopStruct(&sub_reader)) {
-        scoped_ptr<base::ListValue> list_value(new base::ListValue);
+        std::unique_ptr<base::ListValue> list_value(new base::ListValue);
         if (PopListElements(&sub_reader, list_value.get()))
           result = list_value.release();
       }
diff --git a/sandbox/BUILD.gn b/sandbox/BUILD.gn
index 6825a1d..8ca3574 100644
--- a/sandbox/BUILD.gn
+++ b/sandbox/BUILD.gn
@@ -9,12 +9,10 @@
       "//sandbox/win:sandbox",
     ]
   } else if (is_mac) {
-    # TODO(GYP): Make sandbox compile w/ 10.6 SDK.
-    if (false) {
-      public_deps = [
-        "//sandbox/mac:sandbox",
-      ]
-    }
+    public_deps = [
+      "//sandbox/mac:sandbox",
+      "//sandbox/mac:seatbelt",
+    ]
   } else if (is_linux || is_android) {
     public_deps = [
       "//sandbox/linux:sandbox",
diff --git a/sandbox/linux/BUILD.gn b/sandbox/linux/BUILD.gn
index 341d363..e95303e 100644
--- a/sandbox/linux/BUILD.gn
+++ b/sandbox/linux/BUILD.gn
@@ -192,24 +192,15 @@
       rebase_path(outputs, root_build_dir) + rebase_path(inputs, root_build_dir)
 }
 
-# TODO(GYP): Delete this after we've converted everything to GN.
-# The _run targets exist only for compatibility w/ GYP.
-group("sandbox_linux_unittests_run") {
-  testonly = true
-  deps = [
-    ":sandbox_linux_unittests",
-  ]
-}
 
-# The main sandboxing test target. "sandbox_linux_unittests" cannot use the
-# test() template because the test is run as an executable not as an APK on
-# Android.
-executable("sandbox_linux_unittests") {
-  testonly = true
+test("sandbox_linux_unittests") {
   deps = [
     ":sandbox_linux_unittests_sources",
     "//build/config/sanitizers:deps",
   ]
+  if (is_android) {
+    use_raw_android_executable = true
+  }
 }
 
 component("seccomp_bpf") {
@@ -242,9 +233,11 @@
   ]
   defines = [ "SANDBOX_IMPLEMENTATION" ]
 
+  public_deps = [
+    ":sandbox_services_headers",
+  ]
   deps = [
     ":sandbox_services",
-    ":sandbox_services_headers",
     "//base",
   ]
 
@@ -351,6 +344,7 @@
 
   defines = [ "SANDBOX_IMPLEMENTATION" ]
 
+  public_deps = []
   deps = [
     "//base",
   ]
@@ -365,7 +359,7 @@
       "services/namespace_utils.h",
     ]
 
-    deps += [ ":sandbox_services_headers" ]
+    public_deps += [ ":sandbox_services_headers" ]
   }
 
   if (is_nacl_nonsfi) {
@@ -442,23 +436,11 @@
 }
 
 if (is_android) {
-  create_native_executable_dist("sandbox_linux_unittests_deps") {
+  # TODO(GYP_GONE) Delete this after we've converted everything to GN.
+  group("sandbox_linux_unittests_deps") {
     testonly = true
-    dist_dir = "$root_out_dir/sandbox_linux_unittests_deps"
-    binary = "$root_out_dir/sandbox_linux_unittests"
     deps = [
       ":sandbox_linux_unittests",
     ]
-
-    if (is_component_build) {
-      deps += [ "//build/android:cpplib_stripped" ]
-    }
-  }
-
-  test_runner_script("sandbox_linux_unittests__test_runner_script") {
-    test_name = "sandbox_linux_unittests"
-    test_type = "gtest"
-    test_suite = "sandbox_linux_unittests"
-    isolate_file = "//sandbox/sandbox_linux_unittests_android.isolate"
   }
 }
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl.cc b/sandbox/linux/bpf_dsl/bpf_dsl.cc
index 3330c47..fed6368 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl.cc
+++ b/sandbox/linux/bpf_dsl/bpf_dsl.cc
@@ -11,7 +11,6 @@
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h"
 #include "sandbox/linux/bpf_dsl/errorcode.h"
 #include "sandbox/linux/bpf_dsl/policy_compiler.h"
@@ -24,6 +23,7 @@
 class ReturnResultExprImpl : public internal::ResultExprImpl {
  public:
   explicit ReturnResultExprImpl(uint32_t ret) : ret_(ret) {}
+  ~ReturnResultExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc) const override {
     return pc->Return(ret_);
@@ -36,8 +36,6 @@
   }
 
  private:
-  ~ReturnResultExprImpl() override {}
-
   bool IsAction(uint32_t action) const {
     return (ret_ & SECCOMP_RET_ACTION) == action;
   }
@@ -53,6 +51,7 @@
       : func_(func), arg_(arg), safe_(safe) {
     DCHECK(func_);
   }
+  ~TrapResultExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc) const override {
     return pc->Trap(func_, arg_, safe_);
@@ -63,8 +62,6 @@
   bool IsDeny() const override { return true; }
 
  private:
-  ~TrapResultExprImpl() override {}
-
   TrapRegistry::TrapFnc func_;
   const void* arg_;
   bool safe_;
@@ -74,10 +71,13 @@
 
 class IfThenResultExprImpl : public internal::ResultExprImpl {
  public:
-  IfThenResultExprImpl(const BoolExpr& cond,
-                       const ResultExpr& then_result,
-                       const ResultExpr& else_result)
-      : cond_(cond), then_result_(then_result), else_result_(else_result) {}
+  IfThenResultExprImpl(BoolExpr cond,
+                       ResultExpr then_result,
+                       ResultExpr else_result)
+      : cond_(std::move(cond)),
+        then_result_(std::move(then_result)),
+        else_result_(std::move(else_result)) {}
+  ~IfThenResultExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc) const override {
     // We compile the "then" and "else" expressions in separate statements so
@@ -92,8 +92,6 @@
   }
 
  private:
-  ~IfThenResultExprImpl() override {}
-
   BoolExpr cond_;
   ResultExpr then_result_;
   ResultExpr else_result_;
@@ -104,6 +102,7 @@
 class ConstBoolExprImpl : public internal::BoolExprImpl {
  public:
   ConstBoolExprImpl(bool value) : value_(value) {}
+  ~ConstBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -112,8 +111,6 @@
   }
 
  private:
-  ~ConstBoolExprImpl() override {}
-
   bool value_;
 
   DISALLOW_COPY_AND_ASSIGN(ConstBoolExprImpl);
@@ -126,6 +123,7 @@
                           uint64_t mask,
                           uint64_t value)
       : argno_(argno), width_(width), mask_(mask), value_(value) {}
+  ~MaskedEqualBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -134,8 +132,6 @@
   }
 
  private:
-  ~MaskedEqualBoolExprImpl() override {}
-
   int argno_;
   size_t width_;
   uint64_t mask_;
@@ -146,7 +142,8 @@
 
 class NegateBoolExprImpl : public internal::BoolExprImpl {
  public:
-  explicit NegateBoolExprImpl(const BoolExpr& cond) : cond_(cond) {}
+  explicit NegateBoolExprImpl(BoolExpr cond) : cond_(std::move(cond)) {}
+  ~NegateBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -155,8 +152,6 @@
   }
 
  private:
-  ~NegateBoolExprImpl() override {}
-
   BoolExpr cond_;
 
   DISALLOW_COPY_AND_ASSIGN(NegateBoolExprImpl);
@@ -164,8 +159,9 @@
 
 class AndBoolExprImpl : public internal::BoolExprImpl {
  public:
-  AndBoolExprImpl(const BoolExpr& lhs, const BoolExpr& rhs)
-      : lhs_(lhs), rhs_(rhs) {}
+  AndBoolExprImpl(BoolExpr lhs, BoolExpr rhs)
+      : lhs_(std::move(lhs)), rhs_(std::move(rhs)) {}
+  ~AndBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -175,8 +171,6 @@
   }
 
  private:
-  ~AndBoolExprImpl() override {}
-
   BoolExpr lhs_;
   BoolExpr rhs_;
 
@@ -185,8 +179,9 @@
 
 class OrBoolExprImpl : public internal::BoolExprImpl {
  public:
-  OrBoolExprImpl(const BoolExpr& lhs, const BoolExpr& rhs)
-      : lhs_(lhs), rhs_(rhs) {}
+  OrBoolExprImpl(BoolExpr lhs, BoolExpr rhs)
+      : lhs_(std::move(lhs)), rhs_(std::move(rhs)) {}
+  ~OrBoolExprImpl() override {}
 
   CodeGen::Node Compile(PolicyCompiler* pc,
                         CodeGen::Node then_node,
@@ -196,8 +191,6 @@
   }
 
  private:
-  ~OrBoolExprImpl() override {}
-
   BoolExpr lhs_;
   BoolExpr rhs_;
 
@@ -237,64 +230,63 @@
   // accordingly.
   CHECK(size == 4 || size == 8);
 
-  return BoolExpr(new const MaskedEqualBoolExprImpl(num, size, mask, val));
+  return std::make_shared<MaskedEqualBoolExprImpl>(num, size, mask, val);
 }
 
 }  // namespace internal
 
 ResultExpr Allow() {
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_ALLOW));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_ALLOW);
 }
 
 ResultExpr Error(int err) {
   CHECK(err >= ErrorCode::ERR_MIN_ERRNO && err <= ErrorCode::ERR_MAX_ERRNO);
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_ERRNO + err));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_ERRNO + err);
 }
 
 ResultExpr Kill() {
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_KILL));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_KILL);
 }
 
 ResultExpr Trace(uint16_t aux) {
-  return ResultExpr(new const ReturnResultExprImpl(SECCOMP_RET_TRACE + aux));
+  return std::make_shared<ReturnResultExprImpl>(SECCOMP_RET_TRACE + aux);
 }
 
 ResultExpr Trap(TrapRegistry::TrapFnc trap_func, const void* aux) {
-  return ResultExpr(
-      new const TrapResultExprImpl(trap_func, aux, true /* safe */));
+  return std::make_shared<TrapResultExprImpl>(trap_func, aux, true /* safe */);
 }
 
 ResultExpr UnsafeTrap(TrapRegistry::TrapFnc trap_func, const void* aux) {
-  return ResultExpr(
-      new const TrapResultExprImpl(trap_func, aux, false /* unsafe */));
+  return std::make_shared<TrapResultExprImpl>(trap_func, aux,
+                                              false /* unsafe */);
 }
 
 BoolExpr BoolConst(bool value) {
-  return BoolExpr(new const ConstBoolExprImpl(value));
+  return std::make_shared<ConstBoolExprImpl>(value);
 }
 
-BoolExpr Not(const BoolExpr& cond) {
-  return BoolExpr(new const NegateBoolExprImpl(cond));
+BoolExpr Not(BoolExpr cond) {
+  return std::make_shared<NegateBoolExprImpl>(std::move(cond));
 }
 
 BoolExpr AllOf() {
   return BoolConst(true);
 }
 
-BoolExpr AllOf(const BoolExpr& lhs, const BoolExpr& rhs) {
-  return BoolExpr(new const AndBoolExprImpl(lhs, rhs));
+BoolExpr AllOf(BoolExpr lhs, BoolExpr rhs) {
+  return std::make_shared<AndBoolExprImpl>(std::move(lhs), std::move(rhs));
 }
 
 BoolExpr AnyOf() {
   return BoolConst(false);
 }
 
-BoolExpr AnyOf(const BoolExpr& lhs, const BoolExpr& rhs) {
-  return BoolExpr(new const OrBoolExprImpl(lhs, rhs));
+BoolExpr AnyOf(BoolExpr lhs, BoolExpr rhs) {
+  return std::make_shared<OrBoolExprImpl>(std::move(lhs), std::move(rhs));
 }
 
-Elser If(const BoolExpr& cond, const ResultExpr& then_result) {
-  return Elser(nullptr).ElseIf(cond, then_result);
+Elser If(BoolExpr cond, ResultExpr then_result) {
+  return Elser(nullptr).ElseIf(std::move(cond), std::move(then_result));
 }
 
 Elser::Elser(cons::List<Clause> clause_list) : clause_list_(clause_list) {
@@ -306,11 +298,12 @@
 Elser::~Elser() {
 }
 
-Elser Elser::ElseIf(const BoolExpr& cond, const ResultExpr& then_result) const {
-  return Elser(Cons(std::make_pair(cond, then_result), clause_list_));
+Elser Elser::ElseIf(BoolExpr cond, ResultExpr then_result) const {
+  return Elser(Cons(std::make_pair(std::move(cond), std::move(then_result)),
+                    clause_list_));
 }
 
-ResultExpr Elser::Else(const ResultExpr& else_result) const {
+ResultExpr Elser::Else(ResultExpr else_result) const {
   // We finally have the default result expression for this
   // if/then/else sequence.  Also, we've already accumulated all
   // if/then pairs into a list of reverse order (i.e., lower priority
@@ -333,10 +326,10 @@
   //
   // and end up with an appropriately chained tree.
 
-  ResultExpr expr = else_result;
+  ResultExpr expr = std::move(else_result);
   for (const Clause& clause : clause_list_) {
-    expr = ResultExpr(
-        new const IfThenResultExprImpl(clause.first, clause.second, expr));
+    expr = std::make_shared<IfThenResultExprImpl>(clause.first, clause.second,
+                                                  std::move(expr));
   }
   return expr;
 }
@@ -344,5 +337,7 @@
 }  // namespace bpf_dsl
 }  // namespace sandbox
 
-template class scoped_refptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
-template class scoped_refptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+namespace std {
+template class shared_ptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
+template class shared_ptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+}  // namespace std
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl.h b/sandbox/linux/bpf_dsl/bpf_dsl.h
index ffd20ff..7f81344 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl.h
@@ -8,11 +8,11 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include <memory>
 #include <utility>
 #include <vector>
 
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
 #include "sandbox/linux/bpf_dsl/cons.h"
 #include "sandbox/linux/bpf_dsl/trap_registry.h"
@@ -77,10 +77,10 @@
 namespace bpf_dsl {
 
 // ResultExpr is an opaque reference to an immutable result expression tree.
-typedef scoped_refptr<const internal::ResultExprImpl> ResultExpr;
+using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
 
 // BoolExpr is an opaque reference to an immutable boolean expression tree.
-typedef scoped_refptr<const internal::BoolExprImpl> BoolExpr;
+using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
 
 // Allow specifies a result that the system call should be allowed to
 // execute normally.
@@ -121,21 +121,21 @@
 SANDBOX_EXPORT BoolExpr BoolConst(bool value);
 
 // Not returns a BoolExpr representing the logical negation of |cond|.
-SANDBOX_EXPORT BoolExpr Not(const BoolExpr& cond);
+SANDBOX_EXPORT BoolExpr Not(BoolExpr cond);
 
 // AllOf returns a BoolExpr representing the logical conjunction ("and")
 // of zero or more BoolExprs.
 SANDBOX_EXPORT BoolExpr AllOf();
-SANDBOX_EXPORT BoolExpr AllOf(const BoolExpr& lhs, const BoolExpr& rhs);
+SANDBOX_EXPORT BoolExpr AllOf(BoolExpr lhs, BoolExpr rhs);
 template <typename... Rest>
-SANDBOX_EXPORT BoolExpr AllOf(const BoolExpr& first, const Rest&... rest);
+SANDBOX_EXPORT BoolExpr AllOf(BoolExpr first, Rest&&... rest);
 
 // AnyOf returns a BoolExpr representing the logical disjunction ("or")
 // of zero or more BoolExprs.
 SANDBOX_EXPORT BoolExpr AnyOf();
-SANDBOX_EXPORT BoolExpr AnyOf(const BoolExpr& lhs, const BoolExpr& rhs);
+SANDBOX_EXPORT BoolExpr AnyOf(BoolExpr lhs, BoolExpr rhs);
 template <typename... Rest>
-SANDBOX_EXPORT BoolExpr AnyOf(const BoolExpr& first, const Rest&... rest);
+SANDBOX_EXPORT BoolExpr AnyOf(BoolExpr first, Rest&&... rest);
 
 template <typename T>
 class SANDBOX_EXPORT Arg {
@@ -173,7 +173,7 @@
 
 // If begins a conditional result expression predicated on the
 // specified boolean expression.
-SANDBOX_EXPORT Elser If(const BoolExpr& cond, const ResultExpr& then_result);
+SANDBOX_EXPORT Elser If(BoolExpr cond, ResultExpr then_result);
 
 class SANDBOX_EXPORT Elser {
  public:
@@ -182,20 +182,20 @@
 
   // ElseIf extends the conditional result expression with another
   // "if then" clause, predicated on the specified boolean expression.
-  Elser ElseIf(const BoolExpr& cond, const ResultExpr& then_result) const;
+  Elser ElseIf(BoolExpr cond, ResultExpr then_result) const;
 
   // Else terminates a conditional result expression using |else_result| as
   // the default fallback result expression.
-  ResultExpr Else(const ResultExpr& else_result) const;
+  ResultExpr Else(ResultExpr else_result) const;
 
  private:
-  typedef std::pair<BoolExpr, ResultExpr> Clause;
+  using Clause = std::pair<BoolExpr, ResultExpr>;
 
   explicit Elser(cons::List<Clause> clause_list);
 
   cons::List<Clause> clause_list_;
 
-  friend Elser If(const BoolExpr&, const ResultExpr&);
+  friend Elser If(BoolExpr, ResultExpr);
   template <typename T>
   friend Caser<T> Switch(const Arg<T>&);
   DISALLOW_ASSIGN(Elser);
@@ -213,16 +213,16 @@
   ~Caser() {}
 
   // Case adds a single-value "case" clause to the switch.
-  Caser<T> Case(T value, const ResultExpr& result) const;
+  Caser<T> Case(T value, ResultExpr result) const;
 
   // Cases adds a multiple-value "case" clause to the switch.
   // See also the SANDBOX_BPF_DSL_CASES macro below for a more idiomatic way
   // of using this function.
   template <typename... Values>
-  Caser<T> CasesImpl(const ResultExpr& result, const Values&... values) const;
+  Caser<T> CasesImpl(ResultExpr result, const Values&... values) const;
 
   // Terminate the switch with a "default" clause.
-  ResultExpr Default(const ResultExpr& result) const;
+  ResultExpr Default(ResultExpr result) const;
 
  private:
   Caser(const Arg<T>& arg, Elser elser) : arg_(arg), elser_(elser) {}
@@ -299,34 +299,34 @@
 }
 
 template <typename T>
-Caser<T> Caser<T>::Case(T value, const ResultExpr& result) const {
-  return SANDBOX_BPF_DSL_CASES((value), result);
+Caser<T> Caser<T>::Case(T value, ResultExpr result) const {
+  return SANDBOX_BPF_DSL_CASES((value), std::move(result));
 }
 
 template <typename T>
 template <typename... Values>
-Caser<T> Caser<T>::CasesImpl(const ResultExpr& result,
-                             const Values&... values) const {
+Caser<T> Caser<T>::CasesImpl(ResultExpr result, const Values&... values) const {
   // Theoretically we could evaluate arg_ just once and emit a more efficient
   // dispatch table, but for now we simply translate into an equivalent
   // If/ElseIf/Else chain.
 
-  return Caser<T>(arg_, elser_.ElseIf(AnyOf((arg_ == values)...), result));
+  return Caser<T>(arg_,
+                  elser_.ElseIf(AnyOf((arg_ == values)...), std::move(result)));
 }
 
 template <typename T>
-ResultExpr Caser<T>::Default(const ResultExpr& result) const {
-  return elser_.Else(result);
+ResultExpr Caser<T>::Default(ResultExpr result) const {
+  return elser_.Else(std::move(result));
 }
 
 template <typename... Rest>
-BoolExpr AllOf(const BoolExpr& first, const Rest&... rest) {
-  return AllOf(first, AllOf(rest...));
+BoolExpr AllOf(BoolExpr first, Rest&&... rest) {
+  return AllOf(std::move(first), AllOf(std::forward<Rest>(rest)...));
 }
 
 template <typename... Rest>
-BoolExpr AnyOf(const BoolExpr& first, const Rest&... rest) {
-  return AnyOf(first, AnyOf(rest...));
+BoolExpr AnyOf(BoolExpr first, Rest&&... rest) {
+  return AnyOf(std::move(first), AnyOf(std::forward<Rest>(rest)...));
 }
 
 }  // namespace bpf_dsl
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
index 1830389..10477c9 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_forward.h
@@ -5,7 +5,8 @@
 #ifndef SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
 #define SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
 
-#include "base/memory/ref_counted.h"
+#include <memory>
+
 #include "sandbox/sandbox_export.h"
 
 namespace sandbox {
@@ -20,8 +21,8 @@
 class BoolExprImpl;
 }
 
-typedef scoped_refptr<const internal::ResultExprImpl> ResultExpr;
-typedef scoped_refptr<const internal::BoolExprImpl> BoolExpr;
+using ResultExpr = std::shared_ptr<const internal::ResultExprImpl>;
+using BoolExpr = std::shared_ptr<const internal::BoolExprImpl>;
 
 template <typename T>
 class Arg;
@@ -34,9 +35,11 @@
 }  // namespace bpf_dsl
 }  // namespace sandbox
 
+namespace std {
 extern template class SANDBOX_EXPORT
-    scoped_refptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
+    shared_ptr<const sandbox::bpf_dsl::internal::BoolExprImpl>;
 extern template class SANDBOX_EXPORT
-    scoped_refptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+    shared_ptr<const sandbox::bpf_dsl::internal::ResultExprImpl>;
+}  // namespace std
 
 #endif  // SANDBOX_LINUX_BPF_DSL_BPF_DSL_FORWARD_H_
diff --git a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
index 0064f8a..35ff64f 100644
--- a/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
+++ b/sandbox/linux/bpf_dsl/bpf_dsl_impl.h
@@ -5,8 +5,9 @@
 #ifndef SANDBOX_LINUX_BPF_DSL_BPF_DSL_IMPL_H_
 #define SANDBOX_LINUX_BPF_DSL_BPF_DSL_IMPL_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/linux/bpf_dsl/codegen.h"
 #include "sandbox/sandbox_export.h"
 
@@ -18,7 +19,7 @@
 namespace internal {
 
 // Internal interface implemented by BoolExpr implementations.
-class BoolExprImpl : public base::RefCounted<BoolExprImpl> {
+class BoolExprImpl {
  public:
   // Compile uses |pc| to emit a CodeGen::Node that conditionally continues
   // to either |then_node| or |false_node|, depending on whether the represented
@@ -32,12 +33,11 @@
   virtual ~BoolExprImpl() {}
 
  private:
-  friend class base::RefCounted<BoolExprImpl>;
   DISALLOW_COPY_AND_ASSIGN(BoolExprImpl);
 };
 
 // Internal interface implemented by ResultExpr implementations.
-class ResultExprImpl : public base::RefCounted<ResultExprImpl> {
+class ResultExprImpl {
  public:
   // Compile uses |pc| to emit a CodeGen::Node that executes the
   // represented result expression.
@@ -58,7 +58,6 @@
   virtual ~ResultExprImpl() {}
 
  private:
-  friend class base::RefCounted<ResultExprImpl>;
   DISALLOW_COPY_AND_ASSIGN(ResultExprImpl);
 };
 
diff --git a/sandbox/linux/bpf_dsl/cons.h b/sandbox/linux/bpf_dsl/cons.h
index be050f7..07ac3df 100644
--- a/sandbox/linux/bpf_dsl/cons.h
+++ b/sandbox/linux/bpf_dsl/cons.h
@@ -5,8 +5,9 @@
 #ifndef SANDBOX_LINUX_BPF_DSL_CONS_H_
 #define SANDBOX_LINUX_BPF_DSL_CONS_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
 #include "sandbox/sandbox_export.h"
 
 namespace sandbox {
@@ -60,19 +61,19 @@
 
 // List represents a (possibly null) pointer to a cons cell.
 template <typename T>
-using List = scoped_refptr<const Cell<T>>;
+using List = std::shared_ptr<const Cell<T>>;
 
 // Cons extends a cons list by prepending a new value to the front.
 template <typename T>
-List<T> Cons(const T& head, const List<T>& tail) {
-  return List<T>(new const Cell<T>(head, tail));
+List<T> Cons(const T& head, List<T> tail) {
+  return std::make_shared<Cell<T>>(head, std::move(tail));
 }
 
 // Cell represents an individual "cons cell" within a cons list.
 template <typename T>
-class Cell : public base::RefCounted<Cell<T>> {
+class Cell {
  public:
-  Cell(const T& head, const List<T>& tail) : head_(head), tail_(tail) {}
+  Cell(const T& head, List<T> tail) : head_(head), tail_(std::move(tail)) {}
 
   // Head returns this cell's head element.
   const T& head() const { return head_; }
@@ -81,12 +82,9 @@
   const List<T>& tail() const { return tail_; }
 
  private:
-  virtual ~Cell() {}
-
   T head_;
   List<T> tail_;
 
-  friend class base::RefCounted<Cell<T>>;
   DISALLOW_COPY_AND_ASSIGN(Cell);
 };
 
diff --git a/sandbox/linux/sandbox_linux.gypi b/sandbox/linux/sandbox_linux.gypi
index f5b3e0f..e96ae9e 100644
--- a/sandbox/linux/sandbox_linux.gypi
+++ b/sandbox/linux/sandbox_linux.gypi
@@ -376,29 +376,15 @@
     [ 'OS=="android"', {
       'targets': [
       {
-        'target_name': 'sandbox_linux_unittests_stripped',
-        'type': 'none',
-        'dependencies': [ 'sandbox_linux_unittests' ],
-        'actions': [{
-          'action_name': 'strip sandbox_linux_unittests',
-          'inputs': [ '<(PRODUCT_DIR)/sandbox_linux_unittests' ],
-          'outputs': [ '<(PRODUCT_DIR)/sandbox_linux_unittests_stripped' ],
-          'action': [ '<(android_strip)', '<@(_inputs)', '-o', '<@(_outputs)' ],
-        }],
-      },
-      {
         'target_name': 'sandbox_linux_unittests_deps',
         'type': 'none',
         'dependencies': [
-          'sandbox_linux_unittests_stripped',
+          'sandbox_linux_unittests',
         ],
-        # For the component build, ensure dependent shared libraries are
-        # stripped and put alongside sandbox_linux_unittests to simplify pushing
-        # to the device.
         'variables': {
-           'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests_deps/',
-           'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests_stripped',
-           'include_main_binary': 0,
+           'output_dir': '<(PRODUCT_DIR)/sandbox_linux_unittests__dist/',
+           'native_binary': '<(PRODUCT_DIR)/sandbox_linux_unittests',
+           'include_main_binary': 1,
         },
         'includes': [
           '../../build/android/native_app_dependencies.gypi'
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
index dcf308c..fa40e72 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.h
@@ -5,6 +5,8 @@
 #ifndef SANDBOX_LINUX_SECCOMP_BPF_HELPERS_BASELINE_POLICY_H_
 #define SANDBOX_LINUX_SECCOMP_BPF_HELPERS_BASELINE_POLICY_H_
 
+#include <sys/types.h>
+
 #include "base/macros.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl_forward.h"
 #include "sandbox/linux/bpf_dsl/policy.h"
diff --git a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
index 32dc4d1..f0392b1 100644
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy_unittest.cc
@@ -248,6 +248,19 @@
 TEST_BASELINE_SIGSYS(__NR_vserver);
 #endif
 
+#if defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
+BPF_TEST_C(BaselinePolicy, FutexEINVAL, BaselinePolicy) {
+  int ops[] = {
+      FUTEX_CMP_REQUEUE_PI, FUTEX_CMP_REQUEUE_PI_PRIVATE,
+      FUTEX_UNLOCK_PI_PRIVATE,
+  };
+
+  for (int op : ops) {
+    BPF_ASSERT_EQ(-1, syscall(__NR_futex, NULL, op, 0, NULL, NULL, 0));
+    BPF_ASSERT_EQ(EINVAL, errno);
+  }
+}
+#else
 BPF_DEATH_TEST_C(BaselinePolicy,
                  FutexWithRequeuePriorityInheritence,
                  DEATH_SEGV_MESSAGE(GetFutexErrorMessageContentForTests()),
@@ -271,6 +284,7 @@
   syscall(__NR_futex, NULL, FUTEX_UNLOCK_PI_PRIVATE, 0, NULL, NULL, 0);
   _exit(1);
 }
+#endif  // defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
 
 BPF_TEST_C(BaselinePolicy, PrctlDumpable, BaselinePolicy) {
   const int is_dumpable = prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
index 4b98366..fbbd634 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
@@ -95,6 +95,18 @@
 #endif
 }
 
+// Ubuntu's version of glibc has a race condition in sem_post that can cause
+// it to call futex(2) with bogus op arguments. To workaround this, we need
+// to allow those futex(2) calls to fail with EINVAL, instead of crashing the
+// process. See crbug.com/598471.
+inline bool IsBuggyGlibcSemPost() {
+#if defined(LIBC_GLIBC) && !defined(OS_CHROMEOS)
+  return true;
+#else
+  return false;
+#endif
+}
+
 }  // namespace.
 
 #define CASES SANDBOX_BPF_DSL_CASES
@@ -249,15 +261,10 @@
   const uint64_t kAllowedFutexFlags = FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME;
   const Arg<int> op(1);
   return Switch(op & ~kAllowedFutexFlags)
-      .CASES((FUTEX_WAIT,
-              FUTEX_WAKE,
-              FUTEX_REQUEUE,
-              FUTEX_CMP_REQUEUE,
-              FUTEX_WAKE_OP,
-              FUTEX_WAIT_BITSET,
-              FUTEX_WAKE_BITSET),
+      .CASES((FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, FUTEX_CMP_REQUEUE,
+              FUTEX_WAKE_OP, FUTEX_WAIT_BITSET, FUTEX_WAKE_BITSET),
              Allow())
-      .Default(CrashSIGSYSFutex());
+      .Default(IsBuggyGlibcSemPost() ? Error(EINVAL) : CrashSIGSYSFutex());
 }
 
 ResultExpr RestrictGetSetpriority(pid_t target_pid) {
@@ -305,8 +312,16 @@
   static_assert(4 == sizeof(clockid_t), "clockid_t is not 32bit");
   const Arg<clockid_t> clockid(0);
   return Switch(clockid)
-      .CASES((CLOCK_MONOTONIC, CLOCK_MONOTONIC_COARSE, CLOCK_PROCESS_CPUTIME_ID,
-              CLOCK_REALTIME, CLOCK_REALTIME_COARSE, CLOCK_THREAD_CPUTIME_ID),
+      .CASES((
+#if defined(OS_ANDROID)
+              CLOCK_BOOTTIME,
+#endif
+              CLOCK_MONOTONIC,
+              CLOCK_MONOTONIC_COARSE,
+              CLOCK_PROCESS_CPUTIME_ID,
+              CLOCK_REALTIME,
+              CLOCK_REALTIME_COARSE,
+              CLOCK_THREAD_CPUTIME_ID),
              Allow())
       .Default(CrashSIGSYS());
 }
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
index 280211a..9daeedc 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions_unittests.cc
@@ -79,6 +79,9 @@
   CheckClock(CLOCK_MONOTONIC);
   CheckClock(CLOCK_MONOTONIC_COARSE);
   CheckClock(CLOCK_PROCESS_CPUTIME_ID);
+#if defined(OS_ANDROID)
+  CheckClock(CLOCK_BOOTTIME);
+#endif
   CheckClock(CLOCK_REALTIME);
   CheckClock(CLOCK_REALTIME_COARSE);
   CheckClock(CLOCK_THREAD_CPUTIME_ID);
diff --git a/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h b/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
index 00d415c..a4315ba 100644
--- a/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
+++ b/sandbox/linux/seccomp-bpf/bpf_tester_compatibility_delegate.h
@@ -5,8 +5,9 @@
 #ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTER_COMPATIBILITY_DELEGATE_H_
 #define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTER_COMPATIBILITY_DELEGATE_H_
 
+#include <memory>
+
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_test_runner.h"
 
 namespace sandbox {
@@ -28,12 +29,12 @@
 
   ~BPFTesterCompatibilityDelegate() override {}
 
-  scoped_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+  std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
     // The current method is guaranteed to only run in the child process
     // running the test. In this process, the current object is guaranteed
     // to live forever. So it's ok to pass aux_pointer_for_policy_ to
     // the policy, which could in turn pass it to the kernel via Trap().
-    return scoped_ptr<bpf_dsl::Policy>(new Policy(&aux_));
+    return std::unique_ptr<bpf_dsl::Policy>(new Policy(&aux_));
   }
 
   void RunTestFunction() override {
diff --git a/sandbox/linux/seccomp-bpf/bpf_tests.h b/sandbox/linux/seccomp-bpf/bpf_tests.h
index cc4debd..8b2b12a 100644
--- a/sandbox/linux/seccomp-bpf/bpf_tests.h
+++ b/sandbox/linux/seccomp-bpf/bpf_tests.h
@@ -5,6 +5,8 @@
 #ifndef SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
 #define SANDBOX_LINUX_SECCOMP_BPF_BPF_TESTS_H__
 
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
 #include "build/build_config.h"
@@ -104,8 +106,8 @@
       : test_function_(test_function) {}
   ~BPFTesterSimpleDelegate() override {}
 
-  scoped_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
-    return scoped_ptr<bpf_dsl::Policy>(new PolicyClass());
+  std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+    return std::unique_ptr<bpf_dsl::Policy>(new PolicyClass());
   }
   void RunTestFunction() override {
     DCHECK(test_function_);
diff --git a/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc b/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
index e300baf..c16cd72 100644
--- a/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
+++ b/sandbox/linux/seccomp-bpf/bpf_tests_unittest.cc
@@ -10,9 +10,10 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <memory>
+
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "build/build_config.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
 #include "sandbox/linux/bpf_dsl/policy.h"
@@ -76,7 +77,7 @@
   }
   {
     // Test polymorphism.
-    scoped_ptr<BPFTesterDelegate> simple_delegate(
+    std::unique_ptr<BPFTesterDelegate> simple_delegate(
         new BPFTesterCompatibilityDelegate<EmptyClassTakingPolicy, FourtyTwo>(
             DummyTestFunction));
   }
@@ -113,8 +114,8 @@
   BasicBPFTesterDelegate() {}
   ~BasicBPFTesterDelegate() override {}
 
-  scoped_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
-    return scoped_ptr<bpf_dsl::Policy>(new EnosysPtracePolicy());
+  std::unique_ptr<bpf_dsl::Policy> GetSandboxBPFPolicy() override {
+    return std::unique_ptr<bpf_dsl::Policy>(new EnosysPtracePolicy());
   }
   void RunTestFunction() override {
     errno = 0;
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf.cc b/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
index 5cf6c2e..4d8d436 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
@@ -14,8 +14,8 @@
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "sandbox/linux/bpf_dsl/bpf_dsl.h"
 #include "sandbox/linux/bpf_dsl/codegen.h"
 #include "sandbox/linux/bpf_dsl/policy.h"
@@ -31,7 +31,6 @@
 #include "sandbox/linux/system_headers/linux_filter.h"
 #include "sandbox/linux/system_headers/linux_seccomp.h"
 #include "sandbox/linux/system_headers/linux_syscalls.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf.h b/sandbox/linux/seccomp-bpf/sandbox_bpf.h
index e758e03..1637b26 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf.h
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf.h
@@ -7,9 +7,10 @@
 
 #include <stdint.h>
 
+#include <memory>
+
 #include "base/files/scoped_file.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/bpf_dsl/codegen.h"
 #include "sandbox/sandbox_export.h"
 
@@ -104,7 +105,7 @@
 
   base::ScopedFD proc_fd_;
   bool sandbox_has_started_;
-  scoped_ptr<bpf_dsl::Policy> policy_;
+  std::unique_ptr<bpf_dsl::Policy> policy_;
 
   DISALLOW_COPY_AND_ASSIGN(SandboxBPF);
 };
diff --git a/sandbox/linux/seccomp-bpf/syscall.cc b/sandbox/linux/seccomp-bpf/syscall.cc
index bc6461f..4d55936 100644
--- a/sandbox/linux/seccomp-bpf/syscall.cc
+++ b/sandbox/linux/seccomp-bpf/syscall.cc
@@ -190,12 +190,15 @@
     "9:.size SyscallAsm, 9b-SyscallAsm\n"
 #elif defined(__mips__)
     ".text\n"
+    ".option pic2\n"
     ".align 4\n"
+    ".global SyscallAsm\n"
     ".type SyscallAsm, @function\n"
     "SyscallAsm:.ent SyscallAsm\n"
     ".frame  $sp, 40, $ra\n"
     ".set   push\n"
     ".set   noreorder\n"
+    ".cpload $t9\n"
     "addiu  $sp, $sp, -40\n"
     "sw     $ra, 36($sp)\n"
     // Check if "v0" is negative. If so, do not attempt to make a
@@ -204,7 +207,11 @@
     // used as a marker that BPF code inspects.
     "bgez   $v0, 1f\n"
     " nop\n"
-    "la     $v0, 2f\n"
+    // This is equivalent to "la $v0, 2f".
+    // LA macro has to be avoided since LLVM-AS has issue with LA in PIC mode
+    // https://llvm.org/bugs/show_bug.cgi?id=27644
+    "lw     $v0, %got(2f)($gp)\n"
+    "addiu  $v0, $v0, %lo(2f)\n"
     "b      2f\n"
     " nop\n"
     // On MIPS first four arguments go to registers a0 - a3 and any
@@ -262,6 +269,10 @@
 extern "C" {
 intptr_t SyscallAsm(intptr_t nr, const intptr_t args[6]);
 }
+#elif defined(__mips__)
+extern "C" {
+intptr_t SyscallAsm(intptr_t nr, const intptr_t args[8]);
+}
 #endif
 
 }  // namespace
@@ -395,20 +406,21 @@
                                     const intptr_t* args,
                                     intptr_t* err_ret) {
   register intptr_t ret __asm__("v0") = nr;
+  register intptr_t syscallasm __asm__("t9") = (intptr_t) &SyscallAsm;
   // a3 register becomes non zero on error.
   register intptr_t err_stat __asm__("a3") = 0;
   {
     register const intptr_t* data __asm__("a0") = args;
     asm volatile(
-        "la $t9, SyscallAsm\n"
         "jalr $t9\n"
         " nop\n"
         : "=r"(ret), "=r"(err_stat)
         : "0"(ret),
-          "r"(data)
+          "r"(data),
+          "r"(syscallasm)
           // a2 is in the clober list so inline assembly can not change its
           // value.
-        : "memory", "ra", "t9", "a2");
+        : "memory", "ra", "a2");
   }
 
   // Set an error status so it can be used outside of this function
diff --git a/sandbox/linux/services/credentials.cc b/sandbox/linux/services/credentials.cc
index 9e57c56..0c617d4 100644
--- a/sandbox/linux/services/credentials.cc
+++ b/sandbox/linux/services/credentials.cc
@@ -16,13 +16,14 @@
 #include <unistd.h>
 
 #include "base/bind.h"
+#include "base/compiler_specific.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/launch.h"
-#include "base/template_util.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "build/build_config.h"
 #include "sandbox/linux/services/namespace_utils.h"
 #include "sandbox/linux/services/proc_util.h"
@@ -30,7 +31,6 @@
 #include "sandbox/linux/services/thread_helpers.h"
 #include "sandbox/linux/system_headers/capability.h"
 #include "sandbox/linux/system_headers/linux_signal.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
@@ -94,9 +94,9 @@
   // /proc/tid directory for the thread (since /proc may not be aware of the
   // PID namespace). With a process, we can just use /proc/self.
   pid_t pid = -1;
-  char stack_buf[PTHREAD_STACK_MIN];
+  char stack_buf[PTHREAD_STACK_MIN] ALIGNAS(16);
 #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS64_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
   // The stack grows downward.
   void* stack = stack_buf + sizeof(stack_buf);
 #else
diff --git a/sandbox/linux/services/credentials.h b/sandbox/linux/services/credentials.h
index 095d636..b89a6aa 100644
--- a/sandbox/linux/services/credentials.h
+++ b/sandbox/linux/services/credentials.h
@@ -16,7 +16,6 @@
 
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/system_headers/capability.h"
 #include "sandbox/sandbox_export.h"
 
diff --git a/sandbox/linux/services/credentials_unittest.cc b/sandbox/linux/services/credentials_unittest.cc
index d666a0c..b95ba0b 100644
--- a/sandbox/linux/services/credentials_unittest.cc
+++ b/sandbox/linux/services/credentials_unittest.cc
@@ -15,13 +15,13 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <memory>
 #include <vector>
 
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "sandbox/linux/services/proc_util.h"
 #include "sandbox/linux/services/syscall_wrappers.h"
 #include "sandbox/linux/system_headers/capability.h"
@@ -40,7 +40,7 @@
 };
 
 // Wrapper to manage libcap2's cap_t type.
-typedef scoped_ptr<typeof(*((cap_t)0)), CapFreeDeleter> ScopedCap;
+typedef std::unique_ptr<typeof(*((cap_t)0)), CapFreeDeleter> ScopedCap;
 
 bool WorkingDirectoryIsRoot() {
   char current_dir[PATH_MAX];
diff --git a/sandbox/linux/services/namespace_sandbox_unittest.cc b/sandbox/linux/services/namespace_sandbox_unittest.cc
index 43e0ae5..c1acca6 100644
--- a/sandbox/linux/services/namespace_sandbox_unittest.cc
+++ b/sandbox/linux/services/namespace_sandbox_unittest.cc
@@ -16,7 +16,6 @@
 #include "base/files/file_enumerator.h"
 #include "base/files/file_path.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/process/launch.h"
 #include "base/process/process.h"
 #include "base/test/multiprocess_test.h"
diff --git a/sandbox/linux/services/namespace_utils.cc b/sandbox/linux/services/namespace_utils.cc
index 2c2b493..97add26 100644
--- a/sandbox/linux/services/namespace_utils.cc
+++ b/sandbox/linux/services/namespace_utils.cc
@@ -20,7 +20,7 @@
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/launch.h"
 #include "base/strings/safe_sprintf.h"
-#include "third_party/valgrind/valgrind.h"
+#include "base/third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/services/proc_util.cc b/sandbox/linux/services/proc_util.cc
index 247c29c..b6d58de 100644
--- a/sandbox/linux/services/proc_util.cc
+++ b/sandbox/linux/services/proc_util.cc
@@ -11,8 +11,9 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 
+#include <memory>
+
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/strings/string_number_conversions.h"
 
@@ -26,7 +27,7 @@
   }
 };
 
-typedef scoped_ptr<DIR, DIRCloser> ScopedDIR;
+typedef std::unique_ptr<DIR, DIRCloser> ScopedDIR;
 
 base::ScopedFD OpenDirectory(const char* path) {
   DCHECK(path);
diff --git a/sandbox/linux/services/syscall_wrappers.cc b/sandbox/linux/services/syscall_wrappers.cc
index 25cd28d..7132d2a 100644
--- a/sandbox/linux/services/syscall_wrappers.cc
+++ b/sandbox/linux/services/syscall_wrappers.cc
@@ -16,11 +16,11 @@
 
 #include "base/compiler_specific.h"
 #include "base/logging.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "build/build_config.h"
 #include "sandbox/linux/system_headers/capability.h"
 #include "sandbox/linux/system_headers/linux_signal.h"
 #include "sandbox/linux/system_headers/linux_syscalls.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
@@ -55,7 +55,7 @@
 #if defined(ARCH_CPU_X86_64)
   return syscall(__NR_clone, flags, child_stack, ptid, ctid, tls);
 #elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
-    defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_MIPS64_FAMILY)
+    defined(ARCH_CPU_MIPS_FAMILY)
   // CONFIG_CLONE_BACKWARDS defined.
   return syscall(__NR_clone, flags, child_stack, ptid, tls, ctid);
 #endif
diff --git a/sandbox/linux/services/syscall_wrappers_unittest.cc b/sandbox/linux/services/syscall_wrappers_unittest.cc
index 5ba5967..34ac740 100644
--- a/sandbox/linux/services/syscall_wrappers_unittest.cc
+++ b/sandbox/linux/services/syscall_wrappers_unittest.cc
@@ -13,12 +13,12 @@
 
 #include "base/logging.h"
 #include "base/posix/eintr_wrapper.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "build/build_config.h"
 #include "sandbox/linux/system_headers/linux_signal.h"
 #include "sandbox/linux/tests/test_utils.h"
 #include "sandbox/linux/tests/unit_tests.h"
 #include "testing/gtest/include/gtest/gtest.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/services/thread_helpers_unittests.cc b/sandbox/linux/services/thread_helpers_unittests.cc
index 6dcae0f..fe1080b 100644
--- a/sandbox/linux/services/thread_helpers_unittests.cc
+++ b/sandbox/linux/services/thread_helpers_unittests.cc
@@ -12,7 +12,6 @@
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/process_metrics.h"
 #include "base/threading/platform_thread.h"
diff --git a/sandbox/linux/syscall_broker/broker_host.cc b/sandbox/linux/syscall_broker/broker_host.cc
index 5d9d763..dd61dac 100644
--- a/sandbox/linux/syscall_broker/broker_host.cc
+++ b/sandbox/linux/syscall_broker/broker_host.cc
@@ -22,10 +22,10 @@
 #include "base/pickle.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/posix/unix_domain_socket_linux.h"
+#include "base/third_party/valgrind/valgrind.h"
 #include "sandbox/linux/syscall_broker/broker_common.h"
 #include "sandbox/linux/syscall_broker/broker_policy.h"
 #include "sandbox/linux/system_headers/linux_syscalls.h"
-#include "third_party/valgrind/valgrind.h"
 
 namespace sandbox {
 
diff --git a/sandbox/linux/syscall_broker/broker_process.cc b/sandbox/linux/syscall_broker/broker_process.cc
index 5ab8c6c..30713ce 100644
--- a/sandbox/linux/syscall_broker/broker_process.cc
+++ b/sandbox/linux/syscall_broker/broker_process.cc
@@ -19,7 +19,6 @@
 
 #include "base/callback.h"
 #include "base/logging.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/process/process_metrics.h"
 #include "build/build_config.h"
diff --git a/sandbox/linux/syscall_broker/broker_process.h b/sandbox/linux/syscall_broker/broker_process.h
index 8a512a0..3c0c809 100644
--- a/sandbox/linux/syscall_broker/broker_process.h
+++ b/sandbox/linux/syscall_broker/broker_process.h
@@ -5,12 +5,12 @@
 #ifndef SANDBOX_LINUX_SERVICES_BROKER_PROCESS_H_
 #define SANDBOX_LINUX_SERVICES_BROKER_PROCESS_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/callback_forward.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/pickle.h"
 #include "base/process/process.h"
 #include "sandbox/linux/syscall_broker/broker_policy.h"
@@ -82,7 +82,7 @@
   const bool quiet_failures_for_tests_;
   pid_t broker_pid_;                     // The PID of the broker (child).
   syscall_broker::BrokerPolicy policy_;  // The sandboxing policy.
-  scoped_ptr<syscall_broker::BrokerClient> broker_client_;
+  std::unique_ptr<syscall_broker::BrokerClient> broker_client_;
 
   DISALLOW_COPY_AND_ASSIGN(BrokerProcess);
 };
diff --git a/sandbox/linux/syscall_broker/broker_process_unittest.cc b/sandbox/linux/syscall_broker/broker_process_unittest.cc
index 15e1ffb..229764a 100644
--- a/sandbox/linux/syscall_broker/broker_process_unittest.cc
+++ b/sandbox/linux/syscall_broker/broker_process_unittest.cc
@@ -15,6 +15,7 @@
 #include <unistd.h>
 
 #include <algorithm>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -23,7 +24,6 @@
 #include "base/files/scoped_file.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
 #include "base/posix/eintr_wrapper.h"
 #include "base/posix/unix_domain_socket_linux.h"
 #include "sandbox/linux/syscall_broker/broker_client.h"
@@ -58,7 +58,8 @@
   std::vector<BrokerFilePermission> permissions;
   permissions.push_back(BrokerFilePermission::ReadOnly("/proc/cpuinfo"));
 
-  scoped_ptr<BrokerProcess> open_broker(new BrokerProcess(EPERM, permissions));
+  std::unique_ptr<BrokerProcess> open_broker(
+      new BrokerProcess(EPERM, permissions));
   ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
 
   ASSERT_TRUE(TestUtils::CurrentProcessHasChildren());
@@ -251,7 +252,7 @@
   std::vector<BrokerFilePermission> permissions;
 
   permissions.push_back(BrokerFilePermission::ReadOnlyRecursive("/proc/"));
-  scoped_ptr<BrokerProcess> open_broker(
+  std::unique_ptr<BrokerProcess> open_broker(
       new BrokerProcess(EPERM, permissions, fast_check_in_client));
   ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
   // Open cpuinfo via the broker.
@@ -310,7 +311,7 @@
   else
     permissions.push_back(BrokerFilePermission::ReadOnly(kFileCpuInfo));
 
-  scoped_ptr<BrokerProcess> open_broker(
+  std::unique_ptr<BrokerProcess> open_broker(
       new BrokerProcess(EPERM, permissions, fast_check_in_client));
   ASSERT_TRUE(open_broker->Init(base::Bind(&NoOpCallback)));
 
@@ -489,9 +490,16 @@
   // expected.
 }
 
+#if defined(OS_LINUX)
+// Flaky on Linux NG bots: https://crbug.com/595199.
+#define MAYBE_RecvMsgDescriptorLeak DISABLED_RecvMsgDescriptorLeak
+#else
+#define MAYBE_RecvMsgDescriptorLeak RecvMsgDescriptorLeak
+#endif
+
 // We need to allow noise because the broker will log when it receives our
 // bogus IPCs.
-SANDBOX_TEST_ALLOW_NOISE(BrokerProcess, RecvMsgDescriptorLeak) {
+SANDBOX_TEST_ALLOW_NOISE(BrokerProcess, MAYBE_RecvMsgDescriptorLeak) {
   // Android creates a socket on first use of the LOG call.
   // We need to ensure this socket is open before we
   // begin the test.
diff --git a/sandbox/mac/BUILD.gn b/sandbox/mac/BUILD.gn
index cdaf527..fd53131 100644
--- a/sandbox/mac/BUILD.gn
+++ b/sandbox/mac/BUILD.gn
@@ -5,45 +5,6 @@
 import("//build/config/mac/mac_sdk.gni")
 import("//testing/test.gni")
 
-generate_stubs_script = "//tools/generate_stubs/generate_stubs.py"
-generate_stubs_header = "xpc_stubs_header.fragment"
-generate_stubs_sig_public = "xpc_stubs.sig"
-generate_stubs_sig_private = "xpc_private_stubs.sig"
-generate_stubs_project = "sandbox/mac"
-generate_stubs_output_stem = "xpc_stubs"
-
-action("generate_stubs") {
-  script = generate_stubs_script
-  sources = [
-    generate_stubs_sig_private,
-    generate_stubs_sig_public,
-  ]
-  inputs = [
-    generate_stubs_header,
-  ]
-  outputs = [
-    "$target_gen_dir/$generate_stubs_output_stem.cc",
-    "$target_gen_dir/$generate_stubs_output_stem.h",
-  ]
-  args = [
-    "-i",
-    rebase_path(target_gen_dir, root_build_dir),
-    "-o",
-    rebase_path(target_gen_dir, root_build_dir),
-    "-t",
-    "posix_stubs",
-    "-e",
-    rebase_path(generate_stubs_header, root_build_dir),
-    "-s",
-    generate_stubs_output_stem,
-    "-p",
-    generate_stubs_project,
-    "-x",
-    "SANDBOX_EXPORT",
-  ]
-  args += rebase_path(sources, root_build_dir)
-}
-
 component("sandbox") {
   sources = [
     "bootstrap_sandbox.cc",
@@ -59,7 +20,6 @@
     "policy.h",
     "pre_exec_delegate.cc",
     "pre_exec_delegate.h",
-    "xpc.cc",
     "xpc.h",
     "xpc_message_server.cc",
     "xpc_message_server.h",
@@ -71,15 +31,16 @@
   deps = [
     "//base",
   ]
+}
 
-  # When the build SDK is 10.6, generate a dynamic stub loader. When the
-  # SDK is higher, then libxpc.dylib will be loaded automatically as part
-  # of libSystem, and only forward declarations of private symbols are
-  # necessary.
-  if (mac_sdk_version == "10.6") {
-    deps += [ ":generate_stubs" ]
-    sources += get_target_outputs(":generate_stubs")
-  }
+component("seatbelt") {
+  sources = [
+    "seatbelt.cc",
+    "seatbelt.h",
+    "seatbelt_export.h",
+  ]
+  libs = [ "sandbox" ]
+  defines = [ "SEATBELT_IMPLEMENTATION" ]
 }
 
 test("sandbox_mac_unittests") {
diff --git a/sandbox/mac/sandbox_mac.gypi b/sandbox/mac/sandbox_mac.gypi
index 91ad20b..79740e5 100644
--- a/sandbox/mac/sandbox_mac.gypi
+++ b/sandbox/mac/sandbox_mac.gypi
@@ -5,6 +5,26 @@
 {
   'targets': [
     {
+      'target_name': 'seatbelt',
+      'type' : '<(component)',
+      'sources': [
+        'seatbelt.cc',
+        'seatbelt.h',
+        'seatbelt_export.h',
+      ],
+      'defines': [
+        'SEATBELT_IMPLEMENTATION',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'link_settings': {
+        'libraries': [
+          '$(SDKROOT)/usr/lib/libsandbox.dylib',
+        ],
+      }
+    },
+    {
       'target_name': 'sandbox',
       'type': '<(component)',
       'sources': [
@@ -21,7 +41,6 @@
         'policy.h',
         'pre_exec_delegate.cc',
         'pre_exec_delegate.h',
-        'xpc.cc',
         'xpc.h',
         'xpc_message_server.cc',
         'xpc_message_server.h',
@@ -41,52 +60,6 @@
           '$(SDKROOT)/usr/lib/libbsm.dylib',
         ],
       },
-      'conditions': [
-        # When the build SDK is 10.6, generate a dynamic stub loader. When the
-        # SDK is higher, then libxpc.dylib will be loaded automatically as part
-        # of libSystem, and only forward declarations of private symbols are
-        # necessary.
-        ['mac_sdk == "10.6"', {
-          'actions': [
-            {
-              'variables': {
-                'generate_stubs_script': '../tools/generate_stubs/generate_stubs.py',
-                'generate_stubs_header_path': 'xpc_stubs_header.fragment',
-                'generate_stubs_sig_public_path': 'xpc_stubs.sig',
-                'generate_stubs_sig_private_path': 'xpc_private_stubs.sig',
-                'generate_stubs_project': 'sandbox/mac',
-                'generate_stubs_output_stem': 'xpc_stubs',
-              },
-              'action_name': 'generate_stubs',
-              'inputs': [
-                '<(generate_stubs_script)',
-                '<(generate_stubs_header_path)',
-                '<(generate_stubs_sig_public_path)',
-                '<(generate_stubs_sig_private_path)',
-              ],
-              'outputs': [
-                '<(INTERMEDIATE_DIR)/<(generate_stubs_output_stem).cc',
-                '<(SHARED_INTERMEDIATE_DIR)/<(generate_stubs_project)/<(generate_stubs_output_stem).h',
-              ],
-              'action': [
-                'python',
-                '<(generate_stubs_script)',
-                '-i', '<(INTERMEDIATE_DIR)',
-                '-o', '<(SHARED_INTERMEDIATE_DIR)/<(generate_stubs_project)',
-                '-t', 'posix_stubs',
-                '-e', '<(generate_stubs_header_path)',
-                '-s', '<(generate_stubs_output_stem)',
-                '-p', '<(generate_stubs_project)',
-                '-x', 'SANDBOX_EXPORT',
-                '<(generate_stubs_sig_public_path)',
-                '<(generate_stubs_sig_private_path)',
-              ],
-              'process_outputs_as_sources': 1,
-              'message': 'Generating XPC stubs for 10.6 compatability.',
-            },
-          ],
-        }],
-      ],
     },
     {
       'target_name': 'sandbox_mac_unittests',
diff --git a/sandbox/mac/xpc_private_stubs.sig b/sandbox/mac/xpc_private_stubs.sig
deleted file mode 100644
index b8e1c50..0000000
--- a/sandbox/mac/xpc_private_stubs.sig
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains declarations of private XPC functions. This file is
-// used for both forward declarations of private symbols and to use with
-// tools/generate_stubs for creating a dynamic library loader.
-
-// Dictionary manipulation.
-void xpc_dictionary_set_mach_send(xpc_object_t dictionary, const char* name, mach_port_t port);
-void xpc_dictionary_get_audit_token(xpc_object_t dictionary, audit_token_t* token);
-
-// Raw object getters.
-mach_port_t xpc_mach_send_get_right(xpc_object_t value);
-
-// Pipe methods.
-xpc_pipe_t xpc_pipe_create_from_port(mach_port_t port, int flags);
-int xpc_pipe_receive(mach_port_t port, xpc_object_t* message);
-int xpc_pipe_routine(xpc_pipe_t pipe, xpc_object_t request, xpc_object_t* reply);
-int xpc_pipe_routine_reply(xpc_object_t reply);
-int xpc_pipe_simpleroutine(xpc_pipe_t pipe, xpc_object_t message);
-int xpc_pipe_routine_forward(xpc_pipe_t forward_to, xpc_object_t request);
diff --git a/sandbox/mac/xpc_stubs.sig b/sandbox/mac/xpc_stubs.sig
deleted file mode 100644
index b8e7699..0000000
--- a/sandbox/mac/xpc_stubs.sig
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains declarations of public XPC functions used in the sandbox.
-// This file is used with tools/generate_stubs for creating a dynamic library
-// loader.
-
-// XPC object management.
-void xpc_release(xpc_object_t object);
-
-// Dictionary manipulation.
-xpc_object_t xpc_dictionary_create(const char* const *keys, const xpc_object_t* values, size_t count);
-const char* xpc_dictionary_get_string(xpc_object_t dictionary, const char* key);
-uint64_t xpc_dictionary_get_uint64(xpc_object_t dictionary, const char* key);
-void xpc_dictionary_set_uint64(xpc_object_t dictionary, const char* key, uint64_t value);
-int64_t xpc_dictionary_get_int64(xpc_object_t dictionary, const char* key);
-void xpc_dictionary_set_int64(xpc_object_t dictionary, const char* key, int64_t value);
-bool xpc_dictionary_get_bool(xpc_object_t dictionary, const char* key);
-xpc_object_t xpc_dictionary_create_reply(xpc_object_t request);
diff --git a/sandbox/mac/xpc_stubs_header.fragment b/sandbox/mac/xpc_stubs_header.fragment
deleted file mode 100644
index 2aa81cc..0000000
--- a/sandbox/mac/xpc_stubs_header.fragment
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SANDBOX_MAC_XPC_STUBS_HEADER_FRAGMENT_
-#define SANDBOX_MAC_XPC_STUBS_HEADER_FRAGMENT_
-
-#include <bsm/libbsm.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include "sandbox/sandbox_export.h"
-
-// Declare or include public types.
-#if !defined(MAC_OS_X_VERSION_10_7) || \
-    MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7
-
-extern "C" {
-typedef void* xpc_object_t;
-}  // extern "C"
-
-#else
-
-#include <xpc/xpc.h>
-
-#endif
-
-// Declare private types.
-extern "C" {
-typedef struct _xpc_pipe_s* xpc_pipe_t;
-}  // extern "C"
-
-#if defined(MAC_OS_X_VERSION_10_7) && \
-    MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-// Redeclare methods that only exist on 10.7+ to suppress
-// -Wpartial-availability warnings.
-extern "C" {
-XPC_EXPORT XPC_NONNULL1 XPC_NONNULL2 void
-xpc_dictionary_set_int64(xpc_object_t xdict, const char* key, int64_t value);
-
-XPC_EXPORT XPC_NONNULL1 void xpc_release(xpc_object_t object);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL
-bool xpc_dictionary_get_bool(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL int64_t
-xpc_dictionary_get_int64(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL
-const char* xpc_dictionary_get_string(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL_ALL uint64_t
-xpc_dictionary_get_uint64(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_NONNULL1 XPC_NONNULL2 void
-xpc_dictionary_set_uint64(xpc_object_t xdict, const char* key, uint64_t value);
-
-XPC_EXPORT XPC_NONNULL1 XPC_NONNULL2
-void xpc_dictionary_set_string(xpc_object_t xdict, const char* key,
-                               const char* string);
-
-XPC_EXPORT XPC_MALLOC XPC_RETURNS_RETAINED XPC_WARN_RESULT xpc_object_t
-xpc_dictionary_create(const char* const* keys,
-                      const xpc_object_t* values,
-                      size_t count);
-XPC_EXPORT XPC_MALLOC XPC_RETURNS_RETAINED XPC_WARN_RESULT XPC_NONNULL_ALL
-    xpc_object_t
-    xpc_dictionary_create_reply(xpc_object_t original);
-
-XPC_EXPORT XPC_WARN_RESULT XPC_NONNULL1 XPC_NONNULL2
-xpc_object_t xpc_dictionary_get_value(xpc_object_t xdict, const char* key);
-
-XPC_EXPORT XPC_MALLOC XPC_WARN_RESULT XPC_NONNULL1
-char* xpc_copy_description(xpc_object_t object);
-}  // extern "C"
-#endif
-
-#endif  // SANDBOX_MAC_XPC_STUBS_HEADER_FRAGMENT_
diff --git a/sandbox/win/BUILD.gn b/sandbox/win/BUILD.gn
index 327396b..89eaaed 100644
--- a/sandbox/win/BUILD.gn
+++ b/sandbox/win/BUILD.gn
@@ -12,8 +12,6 @@
   sources = [
     "src/acl.cc",
     "src/acl.h",
-    "src/app_container.cc",
-    "src/app_container.h",
     "src/broker_services.cc",
     "src/broker_services.h",
     "src/crosscall_client.h",
@@ -32,12 +30,6 @@
     "src/handle_closer.h",
     "src/handle_closer_agent.cc",
     "src/handle_closer_agent.h",
-    "src/handle_dispatcher.cc",
-    "src/handle_dispatcher.h",
-    "src/handle_interception.cc",
-    "src/handle_interception.h",
-    "src/handle_policy.cc",
-    "src/handle_policy.h",
     "src/interception.cc",
     "src/interception.h",
     "src/interception_agent.cc",
@@ -141,7 +133,6 @@
 
   if (current_cpu == "x64") {
     sources += [
-      "src/Wow64_64.cc",
       "src/interceptors_64.cc",
       "src/interceptors_64.h",
       "src/resolver_64.cc",
@@ -149,8 +140,6 @@
     ]
   } else if (current_cpu == "x86") {
     sources += [
-      "src/Wow64.cc",
-      "src/Wow64.h",
       "src/resolver_32.cc",
       "src/service_resolver_32.cc",
       "src/sidestep/ia32_modrm_map.cpp",
@@ -199,7 +188,6 @@
     "src/file_policy_test.cc",
     "src/handle_closer_test.cc",
     "src/handle_inheritance_test.cc",
-    "src/handle_policy_test.cc",
     "src/integrity_level_test.cc",
     "src/ipc_ping_test.cc",
     "src/lpc_policy_test.cc",
@@ -208,6 +196,7 @@
     "src/process_mitigations_test.cc",
     "src/process_policy_test.cc",
     "src/registry_policy_test.cc",
+    "src/restricted_token_test.cc",
     "src/sync_policy_test.cc",
     "src/sync_policy_test.h",
     "src/unload_dll_test.cc",
@@ -224,6 +213,8 @@
     "//base/test:test_support",
     "//testing/gtest",
   ]
+
+  libs = [ "dxva2.lib" ]
 }
 
 test("sbox_validation_tests") {
@@ -241,11 +232,12 @@
     "//base/test:test_support",
     "//testing/gtest",
   ]
+
+  libs = [ "shlwapi.lib" ]
 }
 
 test("sbox_unittests") {
   sources = [
-    "src/app_container_unittest.cc",
     "src/interception_unittest.cc",
     "src/ipc_unittest.cc",
     "src/job_unittest.cc",
diff --git a/sandbox/win/OWNERS b/sandbox/win/OWNERS
index 85047f7..54a76c1 100644
--- a/sandbox/win/OWNERS
+++ b/sandbox/win/OWNERS
@@ -1,3 +1,4 @@
 cpu@chromium.org
+forshaw@chromium.org
 jschuh@chromium.org
 wfh@chromium.org
diff --git a/sandbox/win/sandbox_win.gypi b/sandbox/win/sandbox_win.gypi
index f0d275a..8ac9e59 100644
--- a/sandbox/win/sandbox_win.gypi
+++ b/sandbox/win/sandbox_win.gypi
@@ -15,8 +15,6 @@
         'sources': [
             'src/acl.cc',
             'src/acl.h',
-            'src/app_container.cc',
-            'src/app_container.h',
             'src/broker_services.cc',
             'src/broker_services.h',
             'src/crosscall_client.h',
@@ -35,12 +33,6 @@
             'src/handle_closer.h',
             'src/handle_closer_agent.cc',
             'src/handle_closer_agent.h',
-            'src/handle_dispatcher.cc',
-            'src/handle_dispatcher.h',
-            'src/handle_interception.cc',
-            'src/handle_interception.h',
-            'src/handle_policy.cc',
-            'src/handle_policy.h',
             'src/interception.cc',
             'src/interception.h',
             'src/interception_agent.cc',
@@ -148,7 +140,6 @@
               'src/interceptors_64.h',
               'src/resolver_64.cc',
               'src/service_resolver_64.cc',
-              'src/Wow64_64.cc',
             ],
           }],
           ['target_arch=="ia32"', {
@@ -164,8 +155,6 @@
               'src/sidestep\mini_disassembler.h',
               'src/sidestep\preamble_patcher_with_stub.cpp',
               'src/sidestep\preamble_patcher.h',
-              'src/Wow64.cc',
-              'src/Wow64.h',
             ],
           }],
         ],
@@ -216,7 +205,6 @@
         'src/app_container_test.cc',
         'src/file_policy_test.cc',
         'src/handle_inheritance_test.cc',
-        'src/handle_policy_test.cc',
         'tests/integration_tests/integration_tests_test.cc',
         'src/handle_closer_test.cc',
         'src/integrity_level_test.cc',
@@ -227,6 +215,7 @@
         'src/process_mitigations_test.cc',
         'src/process_policy_test.cc',
         'src/registry_policy_test.cc',
+        'src/restricted_token_test.cc',
         'src/sync_policy_test.cc',
         'src/sync_policy_test.h',
         'src/unload_dll_test.cc',
@@ -236,6 +225,11 @@
         'tests/common/test_utils.h',
         'tests/integration_tests/integration_tests.cc',
       ],
+      'link_settings': {
+        'libraries': [
+          '-ldxva2.lib',
+        ],
+      },
     },
     {
       'target_name': 'sbox_validation_tests',
@@ -253,6 +247,11 @@
         'tests/validation_tests/commands.h',
         'tests/validation_tests/suite.cc',
       ],
+      'link_settings': {
+        'libraries': [
+          '-lshlwapi.lib',
+        ],
+      },
     },
     {
       'target_name': 'sbox_unittests',
@@ -263,7 +262,6 @@
         '../testing/gtest.gyp:gtest',
       ],
       'sources': [
-        'src/app_container_unittest.cc',
         'src/interception_unittest.cc',
         'src/service_resolver_unittest.cc',
         'src/restricted_token_unittest.cc',
diff --git a/sandbox/win/src/interceptors.h b/sandbox/win/src/interceptors.h
index 2391957..44b34e3 100644
--- a/sandbox/win/src/interceptors.h
+++ b/sandbox/win/src/interceptors.h
@@ -46,6 +46,21 @@
   GDIINITIALIZE_ID,
   GETSTOCKOBJECT_ID,
   REGISTERCLASSW_ID,
+  ENUMDISPLAYMONITORS_ID,
+  ENUMDISPLAYDEVICESA_ID,
+  GETMONITORINFOA_ID,
+  GETMONITORINFOW_ID,
+  CREATEOPMPROTECTEDOUTPUTS_ID,
+  GETCERTIFICATE_ID,
+  GETCERTIFICATESIZE_ID,
+  GETCERTIFICATEBYHANDLE_ID,
+  GETCERTIFICATESIZEBYHANDLE_ID,
+  DESTROYOPMPROTECTEDOUTPUT_ID,
+  CONFIGUREOPMPROTECTEDOUTPUT_ID,
+  GETOPMINFORMATION_ID,
+  GETOPMRANDOMNUMBER_ID,
+  GETSUGGESTEDOPMPROTECTEDOUTPUTARRAYSIZE_ID,
+  SETOPMSIGNINGKEYANDSEQUENCENUMBERS_ID,
   INTERCEPTOR_MAX_ID
 };
 
diff --git a/sandbox/win/src/ipc_tags.h b/sandbox/win/src/ipc_tags.h
index 3a1724b..1c754cd 100644
--- a/sandbox/win/src/ipc_tags.h
+++ b/sandbox/win/src/ipc_tags.h
@@ -28,11 +28,22 @@
   IPC_OPENEVENT_TAG,
   IPC_NTCREATEKEY_TAG,
   IPC_NTOPENKEY_TAG,
-  IPC_DUPLICATEHANDLEPROXY_TAG,
   IPC_GDI_GDIDLLINITIALIZE_TAG,
   IPC_GDI_GETSTOCKOBJECT_TAG,
   IPC_USER_REGISTERCLASSW_TAG,
   IPC_CREATETHREAD_TAG,
+  IPC_USER_ENUMDISPLAYMONITORS_TAG,
+  IPC_USER_ENUMDISPLAYDEVICES_TAG,
+  IPC_USER_GETMONITORINFO_TAG,
+  IPC_GDI_CREATEOPMPROTECTEDOUTPUTS_TAG,
+  IPC_GDI_GETCERTIFICATE_TAG,
+  IPC_GDI_GETCERTIFICATESIZE_TAG,
+  IPC_GDI_DESTROYOPMPROTECTEDOUTPUT_TAG,
+  IPC_GDI_CONFIGUREOPMPROTECTEDOUTPUT_TAG,
+  IPC_GDI_GETOPMINFORMATION_TAG,
+  IPC_GDI_GETOPMRANDOMNUMBER_TAG,
+  IPC_GDI_GETSUGGESTEDOPMPROTECTEDOUTPUTARRAYSIZE_TAG,
+  IPC_GDI_SETOPMSIGNINGKEYANDSEQUENCENUMBERS_TAG,
   IPC_LAST_TAG
 };
 
diff --git a/sandbox/win/src/nt_internals.h b/sandbox/win/src/nt_internals.h
index 2a39d5b..a206e94 100644
--- a/sandbox/win/src/nt_internals.h
+++ b/sandbox/win/src/nt_internals.h
@@ -30,6 +30,7 @@
 #define STATUS_PROCEDURE_NOT_FOUND    ((NTSTATUS)0xC000007AL)
 #define STATUS_INVALID_IMAGE_FORMAT   ((NTSTATUS)0xC000007BL)
 #define STATUS_NO_TOKEN               ((NTSTATUS)0xC000007CL)
+#define STATUS_NOT_SUPPORTED          ((NTSTATUS)0xC00000BBL)
 
 #define CURRENT_PROCESS ((HANDLE) -1)
 #define CURRENT_THREAD  ((HANDLE) -2)
@@ -644,6 +645,8 @@
   IN OUT PUNICODE_STRING DestinationString,
   IN PCWSTR SourceString);
 
+typedef ULONG (WINAPI* RtlNtStatusToDosErrorFunction)(NTSTATUS status);
+
 typedef enum _EVENT_TYPE {
   NotificationEvent,
   SynchronizationEvent
@@ -699,5 +702,164 @@
 
 const unsigned int NtProcessInformationAccessToken = 9;
 
+// -----------------------------------------------------------------------
+// GDI OPM API and Supported Calls
+
+#define DXGKMDT_OPM_OMAC_SIZE 16
+#define DXGKMDT_OPM_128_BIT_RANDOM_NUMBER_SIZE 16
+#define DXGKMDT_OPM_ENCRYPTED_PARAMETERS_SIZE 256
+#define DXGKMDT_OPM_CONFIGURE_SETTING_DATA_SIZE 4056
+#define DXGKMDT_OPM_GET_INFORMATION_PARAMETERS_SIZE 4056
+#define DXGKMDT_OPM_REQUESTED_INFORMATION_SIZE 4076
+#define DXGKMDT_OPM_HDCP_KEY_SELECTION_VECTOR_SIZE 5
+#define DXGKMDT_OPM_PROTECTION_TYPE_SIZE 4
+
+enum DXGKMDT_CERTIFICATE_TYPE {
+  DXGKMDT_OPM_CERTIFICATE = 0,
+  DXGKMDT_COPP_CERTIFICATE = 1,
+  DXGKMDT_UAB_CERTIFICATE = 2,
+  DXGKMDT_FORCE_ULONG = 0xFFFFFFFF
+};
+
+enum DXGKMDT_OPM_VIDEO_OUTPUT_SEMANTICS {
+  DXGKMDT_OPM_VOS_COPP_SEMANTICS = 0,
+  DXGKMDT_OPM_VOS_OPM_SEMANTICS = 1
+};
+
+enum DXGKMDT_DPCP_PROTECTION_LEVEL {
+  DXGKMDT_OPM_DPCP_OFF = 0,
+  DXGKMDT_OPM_DPCP_ON = 1,
+  DXGKMDT_OPM_DPCP_FORCE_ULONG = 0x7fffffff
+};
+
+enum DXGKMDT_OPM_HDCP_PROTECTION_LEVEL {
+  DXGKMDT_OPM_HDCP_OFF = 0,
+  DXGKMDT_OPM_HDCP_ON = 1,
+  DXGKMDT_OPM_HDCP_FORCE_ULONG = 0x7fffffff
+};
+
+enum DXGKMDT_OPM_HDCP_FLAG {
+  DXGKMDT_OPM_HDCP_FLAG_NONE = 0x00,
+  DXGKMDT_OPM_HDCP_FLAG_REPEATER = 0x01
+};
+
+enum DXGKMDT_OPM_PROTECTION_TYPE {
+  DXGKMDT_OPM_PROTECTION_TYPE_OTHER = 0x80000000,
+  DXGKMDT_OPM_PROTECTION_TYPE_NONE = 0x00000000,
+  DXGKMDT_OPM_PROTECTION_TYPE_COPP_COMPATIBLE_HDCP = 0x00000001,
+  DXGKMDT_OPM_PROTECTION_TYPE_ACP = 0x00000002,
+  DXGKMDT_OPM_PROTECTION_TYPE_CGMSA = 0x00000004,
+  DXGKMDT_OPM_PROTECTION_TYPE_HDCP = 0x00000008,
+  DXGKMDT_OPM_PROTECTION_TYPE_DPCP = 0x00000010,
+  DXGKMDT_OPM_PROTECTION_TYPE_MASK = 0x8000001F
+};
+
+typedef void* OPM_PROTECTED_OUTPUT_HANDLE;
+
+struct DXGKMDT_OPM_ENCRYPTED_PARAMETERS {
+  BYTE abEncryptedParameters[DXGKMDT_OPM_ENCRYPTED_PARAMETERS_SIZE];
+};
+
+struct DXGKMDT_OPM_OMAC {
+  BYTE abOMAC[DXGKMDT_OPM_OMAC_SIZE];
+};
+
+struct DXGKMDT_OPM_CONFIGURE_PARAMETERS {
+  DXGKMDT_OPM_OMAC omac;
+  GUID guidSetting;
+  ULONG ulSequenceNumber;
+  ULONG cbParametersSize;
+  BYTE abParameters[DXGKMDT_OPM_CONFIGURE_SETTING_DATA_SIZE];
+};
+
+struct DXGKMDT_OPM_RANDOM_NUMBER {
+  BYTE abRandomNumber[DXGKMDT_OPM_128_BIT_RANDOM_NUMBER_SIZE];
+};
+
+struct DXGKMDT_OPM_GET_INFO_PARAMETERS {
+  DXGKMDT_OPM_OMAC omac;
+  DXGKMDT_OPM_RANDOM_NUMBER rnRandomNumber;
+  GUID guidInformation;
+  ULONG ulSequenceNumber;
+  ULONG cbParametersSize;
+  BYTE abParameters[DXGKMDT_OPM_GET_INFORMATION_PARAMETERS_SIZE];
+};
+
+struct DXGKMDT_OPM_REQUESTED_INFORMATION {
+  DXGKMDT_OPM_OMAC omac;
+  ULONG cbRequestedInformationSize;
+  BYTE abRequestedInformation[DXGKMDT_OPM_REQUESTED_INFORMATION_SIZE];
+};
+
+struct DXGKMDT_OPM_SET_PROTECTION_LEVEL_PARAMETERS {
+  ULONG ulProtectionType;
+  ULONG ulProtectionLevel;
+  ULONG Reserved;
+  ULONG Reserved2;
+};
+
+struct DXGKMDT_OPM_STANDARD_INFORMATION {
+  DXGKMDT_OPM_RANDOM_NUMBER rnRandomNumber;
+  ULONG ulStatusFlags;
+  ULONG ulInformation;
+  ULONG ulReserved;
+  ULONG ulReserved2;
+};
+
+typedef NTSTATUS(WINAPI* GetSuggestedOPMProtectedOutputArraySizeFunction)(
+    PUNICODE_STRING device_name,
+    DWORD* suggested_output_array_size);
+
+typedef NTSTATUS(WINAPI* CreateOPMProtectedOutputsFunction)(
+    PUNICODE_STRING device_name,
+    DXGKMDT_OPM_VIDEO_OUTPUT_SEMANTICS vos,
+    DWORD output_array_size,
+    DWORD* num_in_output_array,
+    OPM_PROTECTED_OUTPUT_HANDLE* output_array);
+
+typedef NTSTATUS(WINAPI* GetCertificateFunction)(
+    PUNICODE_STRING device_name,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    BYTE* certificate,
+    ULONG certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateSizeFunction)(
+    PUNICODE_STRING device_name,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    ULONG* certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateByHandleFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    BYTE* certificate,
+    ULONG certificate_length);
+
+typedef NTSTATUS(WINAPI* GetCertificateSizeByHandleFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    DXGKMDT_CERTIFICATE_TYPE certificate_type,
+    ULONG* certificate_length);
+
+typedef NTSTATUS(WINAPI* DestroyOPMProtectedOutputFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output);
+
+typedef NTSTATUS(WINAPI* ConfigureOPMProtectedOutputFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    const DXGKMDT_OPM_CONFIGURE_PARAMETERS* parameters,
+    ULONG additional_parameters_size,
+    const BYTE* additional_parameters);
+
+typedef NTSTATUS(WINAPI* GetOPMInformationFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    const DXGKMDT_OPM_GET_INFO_PARAMETERS* parameters,
+    DXGKMDT_OPM_REQUESTED_INFORMATION* requested_information);
+
+typedef NTSTATUS(WINAPI* GetOPMRandomNumberFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    DXGKMDT_OPM_RANDOM_NUMBER* random_number);
+
+typedef NTSTATUS(WINAPI* SetOPMSigningKeyAndSequenceNumbersFunction)(
+    OPM_PROTECTED_OUTPUT_HANDLE protected_output,
+    const DXGKMDT_OPM_ENCRYPTED_PARAMETERS* parameters);
+
 #endif  // SANDBOX_WIN_SRC_NT_INTERNALS_H__
 
diff --git a/sandbox/win/src/sandbox_policy.h b/sandbox/win/src/sandbox_policy.h
index df76c36..c0916ea 100644
--- a/sandbox/win/src/sandbox_policy.h
+++ b/sandbox/win/src/sandbox_policy.h
@@ -26,7 +26,6 @@
     SUBSYS_PROCESS,           // Creation of child processes.
     SUBSYS_REGISTRY,          // Creation and opening of registry keys.
     SUBSYS_SYNC,              // Creation of named sync objects.
-    SUBSYS_HANDLES,           // Duplication of handles to other processes.
     SUBSYS_WIN32K_LOCKDOWN    // Win32K Lockdown related policy.
   };
 
@@ -38,9 +37,6 @@
     FILES_ALLOW_QUERY,     // Allows access to query the attributes of a file.
     FILES_ALLOW_DIR_ANY,   // Allows open or create with directory semantics
                            // only.
-    HANDLES_DUP_ANY,       // Allows duplicating handles opened with any
-                           // access permissions.
-    HANDLES_DUP_BROKER,    // Allows duplicating handles to the broker process.
     NAMEDPIPES_ALLOW_ANY,  // Allows creation of a named pipe.
     PROCESS_MIN_EXEC,      // Allows to create a process with minimal rights
                            // over the resulting process and thread handles.
@@ -54,9 +50,12 @@
     EVENTS_ALLOW_READONLY,  // Allows opening an even with synchronize access.
     REG_ALLOW_READONLY,     // Allows readonly access to a registry key.
     REG_ALLOW_ANY,          // Allows read and write access to a registry key.
-    FAKE_USER_GDI_INIT      // Fakes user32 and gdi32 initialization. This can
+    FAKE_USER_GDI_INIT,     // Fakes user32 and gdi32 initialization. This can
                             // be used to allow the DLLs to load and initialize
                             // even if the process cannot access that subsystem.
+    IMPLEMENT_OPM_APIS      // Implements FAKE_USER_GDI_INIT and also exposes
+                            // IPC calls to handle Output Protection Manager
+                            // APIs.
   };
 
   // Increments the reference count of this object. The reference count must
@@ -173,17 +172,6 @@
   // than the current level, the sandbox will fail to start.
   virtual ResultCode SetDelayedIntegrityLevel(IntegrityLevel level) = 0;
 
-  // Sets the AppContainer to be used for the sandboxed process. Any capability
-  // to be enabled for the process should be added before this method is invoked
-  // (by calling SetCapability() as many times as needed).
-  // The desired AppContainer must be already installed on the system, otherwise
-  // launching the sandboxed process will fail. See BrokerServices for details
-  // about installing an AppContainer.
-  // Note that currently Windows restricts the use of impersonation within
-  // AppContainers, so this function is incompatible with the use of an initial
-  // token.
-  virtual ResultCode SetAppContainer(const wchar_t* sid) = 0;
-
   // Sets a capability to be enabled for the sandboxed process' AppContainer.
   virtual ResultCode SetCapability(const wchar_t* sid) = 0;
 
@@ -254,6 +242,16 @@
   // Adds a handle that will be shared with the target process. Does not take
   // ownership of the handle.
   virtual void AddHandleToShare(HANDLE handle) = 0;
+
+  // Locks down the default DACL of the created lockdown and initial tokens
+  // to restrict what other processes are allowed to access a process' kernel
+  // resources.
+  virtual void SetLockdownDefaultDacl() = 0;
+
+  // Enable OPM API redirection when in Win32k lockdown.
+  virtual void SetEnableOPMRedirection() = 0;
+  // Enable OPM API emulation when in Win32k lockdown.
+  virtual bool GetEnableOPMRedirection() = 0;
 };
 
 }  // namespace sandbox
diff --git a/sandbox/win/src/sandbox_types.h b/sandbox/win/src/sandbox_types.h
index b749b9c..919086a 100644
--- a/sandbox/win/src/sandbox_types.h
+++ b/sandbox/win/src/sandbox_types.h
@@ -5,10 +5,16 @@
 #ifndef SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
 #define SANDBOX_WIN_SRC_SANDBOX_TYPES_H_
 
+#include "base/process/launch.h"
+
 namespace sandbox {
 
 // Operation result codes returned by the sandbox API.
-enum ResultCode {
+//
+// Note: These codes are listed in a histogram and any new codes should be added
+// at the end.
+//
+enum ResultCode : int {
   SBOX_ALL_OK = 0,
   // Error is originating on the win32 layer. Call GetlastError() for more
   // information.
@@ -47,6 +53,56 @@
   SBOX_ERROR_PROC_THREAD_ATTRIBUTES = 17,
   // Error in creating process.
   SBOX_ERROR_CREATE_PROCESS = 18,
+  // Failure calling delegate PreSpawnTarget.
+  SBOX_ERROR_DELEGATE_PRE_SPAWN = 19,
+  // Could not assign process to job object.
+  SBOX_ERROR_ASSIGN_PROCESS_TO_JOB_OBJECT = 20,
+  // Could not assign process to job object.
+  SBOX_ERROR_SET_THREAD_TOKEN = 21,
+  // Could not get thread context of new process.
+  SBOX_ERROR_GET_THREAD_CONTEXT = 22,
+  // Could not duplicate target info of new process.
+  SBOX_ERROR_DUPLICATE_TARGET_INFO = 23,
+  // Could not set low box token.
+  SBOX_ERROR_SET_LOW_BOX_TOKEN = 24,
+  // Could not create file mapping for IPC dispatcher.
+  SBOX_ERROR_CREATE_FILE_MAPPING = 25,
+  // Could not duplicate shared section into target process for IPC dispatcher.
+  SBOX_ERROR_DUPLICATE_SHARED_SECTION = 26,
+  // Could not map view of shared memory in broker.
+  SBOX_ERROR_MAP_VIEW_OF_SHARED_SECTION = 27,
+  // Could not apply ASLR mitigations to target process.
+  SBOX_ERROR_APPLY_ASLR_MITIGATIONS = 28,
+  // Could not setup one of the required interception services.
+  SBOX_ERROR_SETUP_BASIC_INTERCEPTIONS = 29,
+  // Could not setup basic interceptions.
+  SBOX_ERROR_SETUP_INTERCEPTION_SERVICE = 30,
+  // Could not initialize interceptions. This usually means 3rd party software
+  // is stomping on our hooks, or can sometimes mean the syscall format has
+  // changed.
+  SBOX_ERROR_INITIALIZE_INTERCEPTIONS = 31,
+  // Could not setup the imports for ntdll in target process.
+  SBOX_ERROR_SETUP_NTDLL_IMPORTS = 32,
+  // Could not setup the handle closer in target process.
+  SBOX_ERROR_SETUP_HANDLE_CLOSER = 33,
+  // Cannot get the current Window Station.
+  SBOX_ERROR_CANNOT_GET_WINSTATION = 34,
+  // Cannot query the security attributes of the current Window Station.
+  SBOX_ERROR_CANNOT_QUERY_WINSTATION_SECURITY = 35,
+  // Cannot get the current Desktop.
+  SBOX_ERROR_CANNOT_GET_DESKTOP = 36,
+  // Cannot query the security attributes of the current Desktop.
+  SBOX_ERROR_CANNOT_QUERY_DESKTOP_SECURITY = 37,
+  // Cannot setup the interception manager config buffer.
+  SBOX_ERROR_CANNOT_SETUP_INTERCEPTION_CONFIG_BUFFER = 38,
+  // Cannot copy data to the child process.
+  SBOX_ERROR_CANNOT_COPY_DATA_TO_CHILD = 39,
+  // Cannot setup the interception thunk.
+  SBOX_ERROR_CANNOT_SETUP_INTERCEPTION_THUNK = 40,
+  // Cannot resolve the interception thunk.
+  SBOX_ERROR_CANNOT_RESOLVE_INTERCEPTION_THUNK = 41,
+  // Cannot write interception thunk to child process.
+  SBOX_ERROR_CANNOT_WRITE_INTERCEPTION_THUNK = 42,
   // Placeholder for last item of the enum.
   SBOX_ERROR_LAST
 };
diff --git a/sandbox/win/wow_helper/wow_helper.vcproj b/sandbox/win/wow_helper/wow_helper.vcproj
index 5482fbd..c8e7c9e 100644
--- a/sandbox/win/wow_helper/wow_helper.vcproj
+++ b/sandbox/win/wow_helper/wow_helper.vcproj
@@ -178,14 +178,6 @@
 	</References>
 	<Files>
 		<Filter
-			Name="base"
-			>
-			<File
-				RelativePath="..\..\base\scoped_ptr.h"
-				>
-			</File>
-		</Filter>
-		<Filter
 			Name="sandbox"
 			>
 			<File
diff --git a/third_party/libevent/event.h b/third_party/libevent/event.h
deleted file mode 100644
index 4a91e4b..0000000
--- a/third_party/libevent/event.h
+++ /dev/null
@@ -1,4 +0,0 @@
-// The Chromium build contains its own checkout of libevent. This stub is used
-// when building the Chrome OS libchrome package to instead use the system
-// headers.
-#include <event.h>
diff --git a/third_party/valgrind/memcheck.h b/third_party/valgrind/memcheck.h
deleted file mode 100644
index 3cd08a9..0000000
--- a/third_party/valgrind/memcheck.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifdef ANDROID
-  #include "memcheck/memcheck.h"
-#else
-  // On Chrome OS, these files will be added in a patch applied in the ebuild.
-  #include <base/third_party/valgrind/memcheck.h>
-#endif
diff --git a/third_party/valgrind/valgrind.h b/third_party/valgrind/valgrind.h
deleted file mode 100644
index 779ef98..0000000
--- a/third_party/valgrind/valgrind.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifdef ANDROID
-  #include "include/valgrind.h"
-#else
-  // These files will be added in a patch applied in the ebuild.
-  #include <base/third_party/valgrind/valgrind.h>
-#endif